Merge "AML: Use config_mediaMetadataBitmapMaxSize which is @SystemApi"
diff --git a/apex/Android.bp b/apex/Android.bp
index 575603f..eee26ae 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -21,14 +21,8 @@
         "libamrextractor",
         "libflacextractor",
         "libmidiextractor",
-        "libmkvextractor",
         "libmp3extractor",
-        "libmp4extractor",
-        "libmpeg2extractor",
-        "liboggextractor",
         "libwavextractor",
-        // MediaPlayer2
-        "libmedia2_jni",
     ],
     key: "com.android.media.key",
 }
diff --git a/camera/Android.bp b/camera/Android.bp
index 24b3918..21588d4 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -43,6 +43,7 @@
         "ICameraRecordingProxyListener.cpp",
         "camera2/CaptureRequest.cpp",
         "camera2/OutputConfiguration.cpp",
+        "camera2/SessionConfiguration.cpp",
         "camera2/SubmitInfo.cpp",
         "CameraBase.cpp",
         "CameraUtils.cpp",
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 4ced08c..49dfde8 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -19,6 +19,7 @@
 import android.hardware.camera2.CaptureRequest;
 import android.hardware.camera2.impl.CameraMetadataNative;
 import android.hardware.camera2.params.OutputConfiguration;
+import android.hardware.camera2.params.SessionConfiguration;
 import android.hardware.camera2.utils.SubmitInfo;
 import android.view.Surface;
 
@@ -83,6 +84,16 @@
      */
     void endConfigure(int operatingMode, in CameraMetadataNative sessionParams);
 
+    /**
+      * Check whether a particular session configuration has camera device
+      * support.
+      *
+      * @param sessionConfiguration Specific session configuration to be verified.
+      * @return true  - in case the stream combination is supported.
+      *         false - in case there is no device support.
+      */
+    boolean isSessionConfigurationSupported(in SessionConfiguration sessionConfiguration);
+
     void deleteStream(int streamId);
 
     /**
diff --git a/camera/aidl/android/hardware/camera2/params/SessionConfiguration.aidl b/camera/aidl/android/hardware/camera2/params/SessionConfiguration.aidl
new file mode 100644
index 0000000..abf1556
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/params/SessionConfiguration.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.params;
+
+/** @hide */
+parcelable SessionConfiguration cpp_header "camera/camera2/SessionConfiguration.h";
diff --git a/camera/camera2/SessionConfiguration.cpp b/camera/camera2/SessionConfiguration.cpp
new file mode 100644
index 0000000..a431a33
--- /dev/null
+++ b/camera/camera2/SessionConfiguration.cpp
@@ -0,0 +1,133 @@
+/*
+**
+** Copyright 2018, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "SessionConfiguration"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <camera/camera2/SessionConfiguration.h>
+#include <camera/camera2/OutputConfiguration.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+status_t SessionConfiguration::readFromParcel(const android::Parcel* parcel) {
+    status_t err = OK;
+    int operatingMode = 0;
+
+    if (parcel == nullptr) return BAD_VALUE;
+
+    if ((err = parcel->readInt32(&operatingMode)) != OK) {
+        ALOGE("%s: Failed to read operating mode from parcel", __FUNCTION__);
+        return err;
+    }
+
+    int inputWidth = 0;
+    if ((err = parcel->readInt32(&inputWidth)) != OK) {
+        ALOGE("%s: Failed to read input width from parcel", __FUNCTION__);
+        return err;
+    }
+
+    int inputHeight = 0;
+    if ((err = parcel->readInt32(&inputHeight)) != OK) {
+        ALOGE("%s: Failed to read input height from parcel", __FUNCTION__);
+        return err;
+    }
+
+    int inputFormat = -1;
+    if ((err = parcel->readInt32(&inputFormat)) != OK) {
+        ALOGE("%s: Failed to read input format from parcel", __FUNCTION__);
+        return err;
+    }
+
+    std::vector<OutputConfiguration> outputStreams;
+    if ((err = parcel->readParcelableVector(&outputStreams)) != OK) {
+        ALOGE("%s: Failed to read output configurations from parcel", __FUNCTION__);
+        return err;
+    }
+
+    mOperatingMode = operatingMode;
+    mInputWidth = inputWidth;
+    mInputHeight = inputHeight;
+    mInputFormat = inputFormat;
+    for (auto& stream : outputStreams) {
+        mOutputStreams.push_back(stream);
+    }
+
+
+    return err;
+}
+
+status_t SessionConfiguration::writeToParcel(android::Parcel* parcel) const {
+
+    if (parcel == nullptr) return BAD_VALUE;
+    status_t err = OK;
+
+    err = parcel->writeInt32(mOperatingMode);
+    if (err != OK) return err;
+
+    err = parcel->writeInt32(mInputWidth);
+    if (err != OK) return err;
+
+    err = parcel->writeInt32(mInputHeight);
+    if (err != OK) return err;
+
+    err = parcel->writeInt32(mInputFormat);
+    if (err != OK) return err;
+
+    err = parcel->writeParcelableVector(mOutputStreams);
+    if (err != OK) return err;
+
+    return OK;
+}
+
+bool SessionConfiguration::outputsEqual(const SessionConfiguration& other) const {
+    const std::vector<OutputConfiguration>& otherOutputStreams =
+            other.getOutputConfigurations();
+
+    if (mOutputStreams.size() !=  otherOutputStreams.size()) {
+        return false;
+    }
+
+    for (size_t i = 0; i < mOutputStreams.size(); i++) {
+        if (mOutputStreams[i] != otherOutputStreams[i]) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+bool SessionConfiguration::outputsLessThan(const SessionConfiguration& other) const {
+    const std::vector<OutputConfiguration>& otherOutputStreams =
+            other.getOutputConfigurations();
+
+    if (mOutputStreams.size() !=  otherOutputStreams.size()) {
+        return mOutputStreams.size() < otherOutputStreams.size();
+    }
+
+    for (size_t i = 0; i < mOutputStreams.size(); i++) {
+        if (mOutputStreams[i] != otherOutputStreams[i]) {
+            return mOutputStreams[i] < otherOutputStreams[i];
+        }
+    }
+
+    return false;
+}
+
+}; // namespace android
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index ef6930c..b88a2c5 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -29,6 +29,7 @@
         "android.hardware.camera.provider@2.4",
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
+        "android.hardware.camera.device@3.4",
     ],
     compile_multilib: "32",
     cflags: [
diff --git a/camera/include/camera/camera2/SessionConfiguration.h b/camera/include/camera/camera2/SessionConfiguration.h
new file mode 100644
index 0000000..64288ed
--- /dev/null
+++ b/camera/include/camera/camera2/SessionConfiguration.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA2_SESSIONCONFIGURATION_H
+#define ANDROID_HARDWARE_CAMERA2_SESSIONCONFIGURATION_H
+
+#include <binder/Parcelable.h>
+
+namespace android {
+
+namespace hardware {
+namespace camera2 {
+namespace params {
+
+class OutputConfiguration;
+
+class SessionConfiguration : public android::Parcelable {
+public:
+
+    const std::vector<OutputConfiguration>& getOutputConfigurations() const {
+        return mOutputStreams;
+    }
+
+    int getInputWidth() const { return mInputWidth; }
+    int getInputHeight() const { return mInputHeight; }
+    int getInputFormat() const { return mInputFormat; }
+    int getOperatingMode() const { return mOperatingMode; }
+
+    virtual status_t writeToParcel(android::Parcel* parcel) const override;
+    virtual status_t readFromParcel(const android::Parcel* parcel) override;
+
+    SessionConfiguration() :
+            mInputWidth(0),
+            mInputHeight(0),
+            mInputFormat(-1),
+            mOperatingMode(-1) {}
+
+    SessionConfiguration(const android::Parcel& parcel) {
+        readFromParcel(&parcel);
+    }
+
+    SessionConfiguration(int inputWidth, int inputHeight, int inputFormat, int operatingMode) :
+        mInputWidth(inputWidth), mInputHeight(inputHeight), mInputFormat(inputFormat),
+        mOperatingMode(operatingMode) {}
+
+    bool operator == (const SessionConfiguration& other) const {
+        return (outputsEqual(other) &&
+                mInputWidth == other.mInputWidth &&
+                mInputHeight == other.mInputHeight &&
+                mInputFormat == other.mInputFormat &&
+                mOperatingMode == other.mOperatingMode);
+    }
+
+    bool operator != (const SessionConfiguration& other) const {
+        return !(*this == other);
+    }
+
+    bool operator < (const SessionConfiguration& other) const {
+        if (*this == other) return false;
+
+        if (mInputWidth != other.mInputWidth) {
+            return mInputWidth < other.mInputWidth;
+        }
+
+        if (mInputHeight != other.mInputHeight) {
+            return mInputHeight < other.mInputHeight;
+        }
+
+        if (mInputFormat != other.mInputFormat) {
+            return mInputFormat < other.mInputFormat;
+        }
+
+        if (mOperatingMode != other.mOperatingMode) {
+            return mOperatingMode < other.mOperatingMode;
+        }
+
+        return outputsLessThan(other);
+    }
+
+    bool operator > (const SessionConfiguration& other) const {
+        return (*this != other && !(*this < other));
+    }
+
+    bool outputsEqual(const SessionConfiguration& other) const;
+    bool outputsLessThan(const SessionConfiguration& other) const;
+    void addOutputConfiguration(const OutputConfiguration &config) {
+        mOutputStreams.push_back(config);
+    }
+
+private:
+
+    std::vector<OutputConfiguration> mOutputStreams;
+    int                              mInputWidth, mInputHeight, mInputFormat, mOperatingMode;
+};
+} // namespace params
+} // namespace camera2
+} // namespace hardware
+
+using hardware::camera2::params::SessionConfiguration;
+
+}; // namespace android
+
+#endif
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index b86f854..26e6b3c 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -754,8 +754,8 @@
         const camera_metadata_t *params_metadata = params.getAndLock();
         utils::convertToHidl(params_metadata, &hidlParams);
         params.unlock(params_metadata);
-        remoteRet = mRemote->endConfigure(StreamConfigurationMode::NORMAL_MODE, hidlParams);
     }
+    remoteRet = mRemote->endConfigure(StreamConfigurationMode::NORMAL_MODE, hidlParams);
     if (!remoteRet.isOk()) {
         ALOGE("Transaction error: endConfigure failed %s", remoteRet.description().c_str());
     }
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 1de7013..fa8a7a3 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -29,6 +29,7 @@
 #include <utils/Condition.h>
 #include <utils/Mutex.h>
 #include <system/graphics.h>
+#include <hardware/camera3.h>
 #include <hardware/gralloc.h>
 
 #include <camera/CameraMetadata.h>
@@ -40,6 +41,7 @@
 #include <android/hardware/camera2/BnCameraDeviceCallbacks.h>
 #include <camera/camera2/CaptureRequest.h>
 #include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
 
 #include <gui/BufferItemConsumer.h>
@@ -55,6 +57,8 @@
 #include <algorithm>
 
 using namespace android;
+using ::android::hardware::ICameraServiceDefault;
+using ::android::hardware::camera2::ICameraDeviceUser;
 
 #define ASSERT_NOT_NULL(x) \
     ASSERT_TRUE((x) != nullptr)
@@ -490,6 +494,19 @@
         EXPECT_TRUE(res.isOk()) << res;
         EXPECT_FALSE(callbacks->hadError());
 
+        // Session configuration must also be supported in this case
+        SessionConfiguration sessionConfiguration = { /*inputWidth*/ 0, /*inputHeight*/0,
+                /*inputFormat*/ -1, CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE};
+        sessionConfiguration.addOutputConfiguration(output);
+        bool queryStatus;
+        res = device->isSessionConfigurationSupported(sessionConfiguration, &queryStatus);
+        EXPECT_TRUE(res.isOk() ||
+                (res.serviceSpecificErrorCode() == ICameraServiceDefault::ERROR_INVALID_OPERATION))
+                << res;
+        if (res.isOk()) {
+            EXPECT_TRUE(queryStatus);
+        }
+
         // Can we make requests?
         CameraMetadata requestTemplate;
         res = device->createDefaultRequest(/*preview template*/1,
diff --git a/include/media/MediaExtractorPluginApi.h b/include/media/MediaExtractorPluginApi.h
index 38962e5..b480bbe 100644
--- a/include/media/MediaExtractorPluginApi.h
+++ b/include/media/MediaExtractorPluginApi.h
@@ -69,6 +69,40 @@
     bool     (*supportsNonBlockingRead)(void *data);
 };
 
+/**
+ * only use CMediaBufferV3 allocated from the CMediaBufferGroupV3 that is
+ * provided to CMediaTrack::start()
+ */
+struct CMediaBufferV3 {
+    void *handle;
+    void (*release)(void *handle);
+    void* (*data)(void *handle);
+    size_t (*size)(void *handle);
+    size_t (*range_offset)(void *handle);
+    size_t (*range_length)(void *handle);
+    void (*set_range)(void *handle, size_t offset, size_t length);
+    AMediaFormat* (*meta_data)(void *handle);
+};
+
+struct CMediaBufferGroupV3 {
+    void *handle;
+    bool (*init)(void *handle, size_t buffers, size_t buffer_size, size_t growthLimit);
+    void (*add_buffer)(void *handle, size_t size);
+    media_status_t (*acquire_buffer)(void *handle,
+            CMediaBufferV3 **buffer, bool nonBlocking, size_t requestedSize);
+    bool (*has_buffers)(void *handle);
+};
+
+struct CMediaTrackV3 {
+    void *data;
+    void (*free)(void *data);
+
+    media_status_t (*start)(void *data, CMediaBufferGroupV3 *bufferGroup);
+    media_status_t (*stop)(void *data);
+    media_status_t (*getFormat)(void *data, AMediaFormat *format);
+    media_status_t (*read)(void *data, CMediaBufferV3 **buffer, uint32_t options, int64_t seekPosUs);
+    bool     (*supportsNonBlockingRead)(void *data);
+};
 
 struct CMediaExtractorV1 {
     void *data;
@@ -104,6 +138,23 @@
     const char * (*name)(void *data);
 };
 
+struct CMediaExtractorV3 {
+    void *data;
+
+    void (*free)(void *data);
+    size_t (*countTracks)(void *data);
+    CMediaTrackV3* (*getTrack)(void *data, size_t index);
+    media_status_t (*getTrackMetaData)(
+            void *data,
+            AMediaFormat *meta,
+            size_t index, uint32_t flags);
+
+    media_status_t (*getMetaData)(void *data, AMediaFormat *meta);
+    uint32_t (*flags)(void *data);
+    media_status_t (*setMediaCas)(void *data, const uint8_t* casToken, size_t size);
+    const char * (*name)(void *data);
+};
+
 typedef CMediaExtractorV1* (*CreatorFuncV1)(CDataSource *source, void *meta);
 typedef void (*FreeMetaFunc)(void *meta);
 
@@ -121,6 +172,12 @@
         CDataSource *source, float *confidence,
         void **meta, FreeMetaFunc *freeMeta);
 
+typedef CMediaExtractorV3* (*CreatorFuncV3)(CDataSource *source, void *meta);
+
+typedef CreatorFuncV3 (*SnifferFuncV3)(
+        CDataSource *source, float *confidence,
+        void **meta, FreeMetaFunc *freeMeta);
+
 typedef CMediaExtractorV1 CMediaExtractor;
 typedef CreatorFuncV1 CreatorFunc;
 
@@ -148,6 +205,7 @@
     union {
         SnifferFuncV1 v1;
         SnifferFuncV2 v2;
+        SnifferFuncV3 v3;
     } sniff;
 };
 
diff --git a/include/media/MediaExtractorPluginHelper.h b/include/media/MediaExtractorPluginHelper.h
index 858c575..292ec93 100644
--- a/include/media/MediaExtractorPluginHelper.h
+++ b/include/media/MediaExtractorPluginHelper.h
@@ -20,12 +20,13 @@
 
 #include <arpa/inet.h>
 #include <stdio.h>
-#include <vector>
+#include <map>
 
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/RefBase.h>
 #include <media/MediaExtractorPluginApi.h>
+#include <media/NdkMediaFormat.h>
 
 namespace android {
 
@@ -171,6 +172,176 @@
     return wrapper;
 }
 
+class MediaTrackHelperV3;
+
+class MediaBufferHelperV3 {
+private:
+    friend CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *);
+    CMediaBufferV3 *mBuffer;
+public:
+    MediaBufferHelperV3(CMediaBufferV3 *buf) {
+        mBuffer = buf;
+    }
+
+    ~MediaBufferHelperV3() {}
+
+    void release() {
+        mBuffer->release(mBuffer->handle);
+    }
+
+    void* data() {
+        return mBuffer->data(mBuffer->handle);
+    }
+
+    size_t size() {
+        return mBuffer->size(mBuffer->handle);
+    }
+
+    size_t range_offset() {
+        return mBuffer->range_offset(mBuffer->handle);
+    }
+
+    size_t range_length() {
+        return mBuffer->range_length(mBuffer->handle);
+    }
+
+    void set_range(size_t offset, size_t length) {
+        mBuffer->set_range(mBuffer->handle, offset, length);
+    }
+    AMediaFormat *meta_data() {
+        return mBuffer->meta_data(mBuffer->handle);
+    }
+};
+
+class MediaBufferGroupHelperV3 {
+private:
+    CMediaBufferGroupV3 *mGroup;
+    std::map<CMediaBufferV3*, MediaBufferHelperV3*> mBufferHelpers;
+public:
+    MediaBufferGroupHelperV3(CMediaBufferGroupV3 *group) {
+        mGroup = group;
+    }
+    ~MediaBufferGroupHelperV3() {
+        // delete all entries in map
+        ALOGV("buffergroup %p map has %zu entries", this, mBufferHelpers.size());
+        for (auto it = mBufferHelpers.begin(); it != mBufferHelpers.end(); ++it) {
+            delete it->second;
+        }
+    }
+    bool init(size_t buffers, size_t buffer_size, size_t growthLimit = 0) {
+        return mGroup->init(mGroup->handle, buffers, buffer_size, growthLimit);
+    }
+    void add_buffer(size_t size) {
+        mGroup->add_buffer(mGroup->handle, size);
+    }
+    media_status_t acquire_buffer(
+            MediaBufferHelperV3 **buffer, bool nonBlocking = false, size_t requestedSize = 0) {
+        CMediaBufferV3 *buf = nullptr;
+        media_status_t ret =
+                mGroup->acquire_buffer(mGroup->handle, &buf, nonBlocking, requestedSize);
+        if (ret == AMEDIA_OK && buf != nullptr) {
+            auto helper = mBufferHelpers.find(buf);
+            if (helper == mBufferHelpers.end()) {
+                MediaBufferHelperV3* newHelper = new MediaBufferHelperV3(buf);
+                mBufferHelpers.insert(std::make_pair(buf, newHelper));
+                *buffer = newHelper;
+            } else {
+                *buffer = helper->second;
+            }
+        } else {
+            *buffer = nullptr;
+        }
+        return ret;
+    }
+    bool has_buffers() {
+        return mGroup->has_buffers(mGroup->handle);
+    }
+};
+
+class MediaTrackHelperV3 {
+public:
+    MediaTrackHelperV3() : mBufferGroup(nullptr) {
+    }
+    virtual ~MediaTrackHelperV3() {
+        delete mBufferGroup;
+    }
+    virtual media_status_t start() = 0;
+    virtual media_status_t stop() = 0;
+    virtual media_status_t getFormat(AMediaFormat *format) = 0;
+
+    class ReadOptions {
+    public:
+        enum SeekMode : int32_t {
+            SEEK_PREVIOUS_SYNC,
+            SEEK_NEXT_SYNC,
+            SEEK_CLOSEST_SYNC,
+            SEEK_CLOSEST,
+            SEEK_FRAME_INDEX,
+        };
+
+        ReadOptions(uint32_t options, int64_t seekPosUs) {
+            mOptions = options;
+            mSeekPosUs = seekPosUs;
+        }
+        bool getSeekTo(int64_t *time_us, SeekMode *mode) const {
+            if ((mOptions & CMediaTrackReadOptions::SEEK) == 0) {
+                return false;
+            }
+            *time_us = mSeekPosUs;
+            *mode = (SeekMode) (mOptions & 7);
+            return true;
+        }
+        bool getNonBlocking() const {
+            return mOptions & CMediaTrackReadOptions::NONBLOCKING;
+        }
+    private:
+        uint32_t mOptions;
+        int64_t mSeekPosUs;
+    };
+
+    virtual media_status_t read(
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL) = 0;
+    virtual bool supportsNonBlockingRead() { return false; }
+protected:
+    friend CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *track);
+    MediaBufferGroupHelperV3 *mBufferGroup;
+};
+
+inline CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *track) {
+    CMediaTrackV3 *wrapper = (CMediaTrackV3*) malloc(sizeof(CMediaTrackV3));
+    wrapper->data = track;
+    wrapper->free = [](void *data) -> void {
+        delete (MediaTrackHelperV3*)(data);
+    };
+    wrapper->start = [](void *data, CMediaBufferGroupV3 *bufferGroup) -> media_status_t {
+        if (((MediaTrackHelperV3*)data)->mBufferGroup) {
+            // this shouldn't happen, but handle it anyway
+            delete ((MediaTrackHelperV3*)data)->mBufferGroup;
+        }
+        ((MediaTrackHelperV3*)data)->mBufferGroup = new MediaBufferGroupHelperV3(bufferGroup);
+        return ((MediaTrackHelperV3*)data)->start();
+    };
+    wrapper->stop = [](void *data) -> media_status_t {
+        return ((MediaTrackHelperV3*)data)->stop();
+    };
+    wrapper->getFormat = [](void *data, AMediaFormat *meta) -> media_status_t {
+        return ((MediaTrackHelperV3*)data)->getFormat(meta);
+    };
+    wrapper->read = [](void *data, CMediaBufferV3 **buffer,  uint32_t options, int64_t seekPosUs)
+            -> media_status_t {
+        MediaTrackHelperV3::ReadOptions opts(options, seekPosUs);
+        MediaBufferHelperV3 *buf = NULL;
+        media_status_t ret = ((MediaTrackHelperV3*)data)->read(&buf, &opts);
+        if (ret == AMEDIA_OK && buf != nullptr) {
+            *buffer = buf->mBuffer;
+        }
+        return ret;
+    };
+    wrapper->supportsNonBlockingRead = [](void *data) -> bool {
+                return ((MediaTrackHelperV3*)data)->supportsNonBlockingRead();
+    };
+    return wrapper;
+}
 
 
 // extractor plugins can derive from this class which looks remarkably
@@ -341,6 +512,89 @@
     return wrapper;
 }
 
+class MediaExtractorPluginHelperV3
+{
+public:
+    virtual ~MediaExtractorPluginHelperV3() {}
+    virtual size_t countTracks() = 0;
+    virtual MediaTrackHelperV3 *getTrack(size_t index) = 0;
+
+    enum GetTrackMetaDataFlags {
+        kIncludeExtensiveMetaData = 1
+    };
+    virtual media_status_t getTrackMetaData(
+            AMediaFormat *meta,
+            size_t index, uint32_t flags = 0) = 0;
+
+    // Return container specific meta-data. The default implementation
+    // returns an empty metadata object.
+    virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
+
+    enum Flags {
+        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
+        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
+        CAN_PAUSE          = 4,
+        CAN_SEEK           = 8,  // the "seek bar"
+    };
+
+    // If subclasses do _not_ override this, the default is
+    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
+    virtual uint32_t flags() const {
+        return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
+    };
+
+    virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
+        return AMEDIA_ERROR_INVALID_OPERATION;
+    }
+
+    virtual const char * name() { return "<unspecified>"; }
+
+protected:
+    MediaExtractorPluginHelperV3() {}
+
+private:
+    MediaExtractorPluginHelperV3(const MediaExtractorPluginHelperV2 &);
+    MediaExtractorPluginHelperV3 &operator=(const MediaExtractorPluginHelperV2 &);
+};
+
+inline CMediaExtractorV3 *wrapV3(MediaExtractorPluginHelperV3 *extractor) {
+    CMediaExtractorV3 *wrapper = (CMediaExtractorV3*) malloc(sizeof(CMediaExtractorV3));
+    wrapper->data = extractor;
+    wrapper->free = [](void *data) -> void {
+        delete (MediaExtractorPluginHelperV3*)(data);
+    };
+    wrapper->countTracks = [](void *data) -> size_t {
+        return ((MediaExtractorPluginHelperV3*)data)->countTracks();
+    };
+    wrapper->getTrack = [](void *data, size_t index) -> CMediaTrackV3* {
+        return wrapV3(((MediaExtractorPluginHelperV3*)data)->getTrack(index));
+    };
+    wrapper->getTrackMetaData = [](
+            void *data,
+            AMediaFormat *meta,
+            size_t index, uint32_t flags) -> media_status_t {
+        return ((MediaExtractorPluginHelperV3*)data)->getTrackMetaData(meta, index, flags);
+    };
+    wrapper->getMetaData = [](
+            void *data,
+            AMediaFormat *meta) -> media_status_t {
+        return ((MediaExtractorPluginHelperV3*)data)->getMetaData(meta);
+    };
+    wrapper->flags = [](
+            void *data) -> uint32_t {
+        return ((MediaExtractorPluginHelperV3*)data)->flags();
+    };
+    wrapper->setMediaCas = [](
+            void *data, const uint8_t *casToken, size_t size) -> media_status_t {
+        return ((MediaExtractorPluginHelperV3*)data)->setMediaCas(casToken, size);
+    };
+    wrapper->name = [](
+            void *data) -> const char * {
+        return ((MediaExtractorPluginHelperV3*)data)->name();
+    };
+    return wrapper;
+}
+
 /* adds some convience methods */
 class DataSourceHelper {
 public:
diff --git a/include/media/MediaMetrics.h b/include/media/MediaMetrics.h
new file mode 120000
index 0000000..5f757e4
--- /dev/null
+++ b/include/media/MediaMetrics.h
@@ -0,0 +1 @@
+../../media/libmediametrics/include/MediaMetrics.h
\ No newline at end of file
diff --git a/include/media/MediaTrack.h b/include/media/MediaTrack.h
index ee3591e..baa3410 100644
--- a/include/media/MediaTrack.h
+++ b/include/media/MediaTrack.h
@@ -23,6 +23,7 @@
 #include <binder/IMemory.h>
 #include <binder/MemoryDealer.h>
 #include <media/MediaExtractorPluginApi.h>
+#include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/MediaExtractorPluginApi.h>
@@ -175,6 +176,25 @@
     CMediaTrackV2 *wrapper;
 };
 
+class MediaTrackCUnwrapperV3 : public MediaTrack {
+public:
+    explicit MediaTrackCUnwrapperV3(CMediaTrackV3 *wrapper);
+
+    virtual status_t start();
+    virtual status_t stop();
+    virtual status_t getFormat(MetaDataBase& format);
+    virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+    virtual bool supportNonblockingRead();
+
+protected:
+    virtual ~MediaTrackCUnwrapperV3();
+
+private:
+    CMediaTrackV3 *wrapper;
+    MediaBufferGroup *bufferGroup;
+};
+
 }  // namespace android
 
 #endif  // MEDIA_SOURCE_BASE_H_
diff --git a/include/media/NdkMediaErrorPriv.h b/include/media/NdkMediaErrorPriv.h
index f5e2f02..3bbba79 100644
--- a/include/media/NdkMediaErrorPriv.h
+++ b/include/media/NdkMediaErrorPriv.h
@@ -20,10 +20,8 @@
 #include <media/NdkMediaError.h>
 #include <utils/Errors.h>
 
-using namespace android;
+media_status_t translate_error(android::status_t);
 
-media_status_t translate_error(status_t);
-
-status_t reverse_translate_error(media_status_t);
+android::status_t reverse_translate_error(media_status_t);
 
 #endif // _NDK_MEDIA_ERROR_PRIV_H
diff --git a/include/media/NdkMediaFormatPriv.h b/include/media/NdkMediaFormatPriv.h
index 6c452c3..1fda4a8 100644
--- a/include/media/NdkMediaFormatPriv.h
+++ b/include/media/NdkMediaFormatPriv.h
@@ -27,6 +27,7 @@
 #ifndef _NDK_MEDIA_FORMAT_PRIV_H
 #define _NDK_MEDIA_FORMAT_PRIV_H
 
+#include <utils/KeyedVector.h>
 #include <utils/String8.h>
 #include <utils/StrongPointer.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -35,20 +36,22 @@
 extern "C" {
 #endif
 
-using namespace android;
-
 struct AMediaFormat {
-    sp<AMessage> mFormat;
-    String8 mDebug;
-    KeyedVector<String8, String8> mStringCache;
+    android::sp<android::AMessage> mFormat;
+    android::String8 mDebug;
+    android::KeyedVector<android::String8, android::String8> mStringCache;
 };
 
-AMediaFormat* AMediaFormat_fromMsg(const void*);
-void AMediaFormat_getFormat(const AMediaFormat* mData, void* dest);
-
 #ifdef __cplusplus
 } // extern "C"
 #endif
 
+namespace android {
+
+AMediaFormat* AMediaFormat_fromMsg(sp<AMessage> *);
+void AMediaFormat_getFormat(const AMediaFormat* mData, sp<AMessage> *dest);
+
+} // namespace android
+
 #endif // _NDK_MEDIA_FORMAT_PRIV_H
 
diff --git a/include/media/VolumeShaper.h b/include/media/VolumeShaper.h
index a3aaece..79afd6c 100644
--- a/include/media/VolumeShaper.h
+++ b/include/media/VolumeShaper.h
@@ -551,7 +551,7 @@
 
     static int64_t convertTimespecToUs(const struct timespec &tv)
     {
-        return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
+        return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
     }
 
     // current monotonic time in microseconds.
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 0165314..2c734ac 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -282,7 +282,7 @@
 
 Accessor::Impl::Impl::BufferPool::~BufferPool() {
     std::lock_guard<std::mutex> lock(mMutex);
-    ALOGD("Destruction - bufferpool %p "
+    ALOGD("Destruction - bufferpool2 %p "
           "cached: %zu/%zuM, %zu/%d%% in use; "
           "allocs: %zu, %d%% recycled; "
           "transfers: %zu, %d%% unfetced",
@@ -353,12 +353,12 @@
             msgId = ++mInvalidationId;
         }
     }
-    ALOGV("bufferpool invalidation requested and queued");
+    ALOGV("bufferpool2 invalidation requested and queued");
     if (left == 0) {
         channel.postInvalidation(msgId, from, to);
     } else {
         // TODO: sending hint message?
-        ALOGV("bufferpool invalidation requested and pending");
+        ALOGV("bufferpoo2 invalidation requested and pending");
         Pending pending(needsAck, from, to, left, impl);
         mPendings.push_back(pending);
     }
@@ -380,7 +380,7 @@
                     // lost.
                     it->second = mInvalidationId;
                 } else {
-                    ALOGV("bufferpool observer died %lld", (long long)it->first);
+                    ALOGV("bufferpool2 observer died %lld", (long long)it->first);
                     deads.insert(it->first);
                 }
             }
@@ -682,7 +682,7 @@
         mLastCleanUpUs = mTimestampUs;
         if (mTimestampUs > mLastLogUs + kLogDurationUs) {
             mLastLogUs = mTimestampUs;
-            ALOGD("bufferpool %p : %zu(%zu size) total buffers - "
+            ALOGD("bufferpool2 %p : %zu(%zu size) total buffers - "
                   "%zu(%zu size) used buffers - %zu/%zu (recycle/alloc) - "
                   "%zu/%zu (fetch/transfer)",
                   this, mStats.mBuffersCached, mStats.mSizeCached,
@@ -703,7 +703,7 @@
                 freeIt = mFreeBuffers.erase(freeIt);
             } else {
                 ++freeIt;
-                ALOGW("bufferpool inconsistent!");
+                ALOGW("bufferpool2 inconsistent!");
             }
         }
     }
@@ -722,7 +722,7 @@
                 freeIt = mFreeBuffers.erase(freeIt);
                 continue;
             } else {
-                ALOGW("bufferpool inconsistent!");
+                ALOGW("bufferpool2 inconsistent!");
             }
         }
         ++freeIt;
diff --git a/media/bufferpool/2.0/BufferPoolClient.cpp b/media/bufferpool/2.0/BufferPoolClient.cpp
index 5564a13..f907de5 100644
--- a/media/bufferpool/2.0/BufferPoolClient.cpp
+++ b/media/bufferpool/2.0/BufferPoolClient.cpp
@@ -811,7 +811,7 @@
 }
 
 void BufferPoolClient::receiveInvalidation(uint32_t msgId) {
-    ALOGV("bufferpool client recv inv %u", msgId);
+    ALOGV("bufferpool2 client recv inv %u", msgId);
     if (isValid()) {
         mImpl->receiveInvalidation(msgId);
     }
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index 06bb81a..d02f541 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -31,9 +31,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 7990ee5..50b4d20 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -489,6 +489,13 @@
     }
 
     ALOGV("start processing frame #%" PRIu64, work->input.ordinal.frameIndex.peeku());
+    // If input buffer list is not empty, it means we have some input to process on.
+    // However, input could be a null buffer. In such case, clear the buffer list
+    // before making call to process().
+    if (!work->input.buffers.empty() && !work->input.buffers[0]) {
+        ALOGD("Encountered null input buffer. Clearing the input buffer");
+        work->input.buffers.clear();
+    }
     process(work, mOutputBlockPool);
     ALOGV("processed frame #%" PRIu64, work->input.ordinal.frameIndex.peeku());
     {
diff --git a/media/codec2/components/cmds/Android.bp b/media/codec2/components/cmds/Android.bp
index 994dfee..6b0977b 100644
--- a/media/codec2/components/cmds/Android.bp
+++ b/media/codec2/components/cmds/Android.bp
@@ -33,8 +33,5 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 8d2a652..5c83481 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -83,6 +83,18 @@
                 DefineParam(mInputMaxBufSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
                 .withConstValue(new C2StreamMaxBufferSizeInfo::input(0u, 64 * 1024))
                 .build());
+
+        addParameter(
+                DefineParam(mPcmEncodingInfo, C2_PARAMKEY_PCM_ENCODING)
+                .withDefault(new C2StreamPcmEncodingInfo::output(0u, C2Config::PCM_16))
+                .withFields({C2F(mPcmEncodingInfo, value).oneOf({
+                     C2Config::PCM_16,
+                     C2Config::PCM_8,
+                     C2Config::PCM_FLOAT})
+                })
+                .withSetter((Setter<decltype(*mPcmEncodingInfo)>::StrictValueWithNoDeps))
+                .build());
+
     }
 
 private:
@@ -94,6 +106,7 @@
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
     std::shared_ptr<C2BitrateTuning::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
+    std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
 };
 
 C2SoftRawDec::C2SoftRawDec(
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 01de681..8ecbf5d 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -97,6 +97,26 @@
                 .withSetter(ProfileLevelSetter, mSize)
                 .build());
 
+        mHdr10PlusInfoInput = C2StreamHdr10PlusInfo::input::AllocShared(0);
+        addParameter(
+                DefineParam(mHdr10PlusInfoInput, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO)
+                .withDefault(mHdr10PlusInfoInput)
+                .withFields({
+                    C2F(mHdr10PlusInfoInput, m.value).any(),
+                })
+                .withSetter(Hdr10PlusInfoInputSetter)
+                .build());
+
+        mHdr10PlusInfoOutput = C2StreamHdr10PlusInfo::output::AllocShared(0);
+        addParameter(
+                DefineParam(mHdr10PlusInfoOutput, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO)
+                .withDefault(mHdr10PlusInfoOutput)
+                .withFields({
+                    C2F(mHdr10PlusInfoOutput, m.value).any(),
+                })
+                .withSetter(Hdr10PlusInfoOutputSetter)
+                .build());
+
 #if 0
         // sample BT.2020 static info
         mHdrStaticInfo = std::make_shared<C2StreamHdrStaticInfo::output>();
@@ -217,6 +237,18 @@
         return C2R::Ok();
     }
 
+    static C2R Hdr10PlusInfoInputSetter(bool mayBlock, C2P<C2StreamHdr10PlusInfo::input> &me) {
+        (void)mayBlock;
+        (void)me;  // TODO: validate
+        return C2R::Ok();
+    }
+
+    static C2R Hdr10PlusInfoOutputSetter(bool mayBlock, C2P<C2StreamHdr10PlusInfo::output> &me) {
+        (void)mayBlock;
+        (void)me;  // TODO: validate
+        return C2R::Ok();
+    }
+
 private:
     std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
     std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
@@ -228,6 +260,8 @@
 #if 0
     std::shared_ptr<C2StreamHdrStaticInfo::output> mHdrStaticInfo;
 #endif
+    std::shared_ptr<C2StreamHdr10PlusInfo::input> mHdr10PlusInfoInput;
+    std::shared_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfoOutput;
 #endif
 };
 
@@ -370,7 +404,8 @@
                            const std::shared_ptr<C2GraphicBlock> &block) {
     std::shared_ptr<C2Buffer> buffer = createGraphicBuffer(block,
                                                            C2Rect(mWidth, mHeight));
-    auto fillWork = [buffer, index](const std::unique_ptr<C2Work> &work) {
+    auto fillWork = [buffer, index, intf = this->mIntf](
+            const std::unique_ptr<C2Work> &work) {
         uint32_t flags = 0;
         if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
                 (c2_cntr64_t(index) == work->input.ordinal.frameIndex)) {
@@ -382,6 +417,28 @@
         work->worklets.front()->output.buffers.push_back(buffer);
         work->worklets.front()->output.ordinal = work->input.ordinal;
         work->workletsProcessed = 1u;
+
+        for (const std::unique_ptr<C2Param> &param: work->input.configUpdate) {
+            if (param) {
+                C2StreamHdr10PlusInfo::input *hdr10PlusInfo =
+                        C2StreamHdr10PlusInfo::input::From(param.get());
+
+                if (hdr10PlusInfo != nullptr) {
+                    std::vector<std::unique_ptr<C2SettingResult>> failures;
+                    std::unique_ptr<C2Param> outParam = C2Param::CopyAsStream(
+                            *param.get(), true /*output*/, param->stream());
+                    c2_status_t err = intf->config(
+                            { outParam.get() }, C2_MAY_BLOCK, &failures);
+                    if (err == C2_OK) {
+                        work->worklets.front()->output.configUpdate.push_back(
+                                C2Param::Copy(*outParam.get()));
+                    } else {
+                        ALOGE("finishWork: Config update size failed");
+                    }
+                    break;
+                }
+            }
+        }
     };
     if (work && c2_cntr64_t(index) == work->input.ordinal.frameIndex) {
         fillWork(work);
diff --git a/media/codec2/core/include/C2.h b/media/codec2/core/include/C2.h
index 8a55f8d..ef3466d 100644
--- a/media/codec2/core/include/C2.h
+++ b/media/codec2/core/include/C2.h
@@ -289,7 +289,7 @@
      * Convert to a smaller counter type. This is always safe.
      */
     template<typename U, typename E=typename std::enable_if<(sizeof(U) < sizeof(T))>::type>
-    inline operator c2_cntr_t<U>() {
+    inline constexpr operator c2_cntr_t<U>() {
         return c2_cntr_t<U>(mValue);
     }
 
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 799ade4..27aa064 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -194,6 +194,7 @@
     kParamIndexLayerIndex,
     kParamIndexLayerCount,
     kParamIndexIntraRefresh,
+    kParamIndexHdr10PlusMetadata,
 
     /* ------------------------------------ image components ------------------------------------ */
 
@@ -1560,6 +1561,14 @@
         C2StreamHdrStaticInfo;
 constexpr char C2_PARAMKEY_HDR_STATIC_INFO[] = "raw.hdr-static-info";
 
+/**
+ * HDR10+ Metadata Info.
+ */
+typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexHdr10PlusMetadata>
+        C2StreamHdr10PlusInfo;
+constexpr char C2_PARAMKEY_INPUT_HDR10_PLUS_INFO[] = "input.hdr10-plus-info";
+constexpr char C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO[] = "output.hdr10-plus-info";
+
 /* ------------------------------------ block-based coding ----------------------------------- */
 
 /**
diff --git a/media/codec2/faultinjection/Android.bp b/media/codec2/faultinjection/Android.bp
index ade638f..a0ad3ce 100644
--- a/media/codec2/faultinjection/Android.bp
+++ b/media/codec2/faultinjection/Android.bp
@@ -22,9 +22,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index 455de50..c5ad6a0 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -24,7 +24,7 @@
         "android.hardware.graphics.bufferqueue@1.0",
         "android.hardware.graphics.common@1.0",
         "android.hardware.media@1.0",
-        "android.hardware.media.bufferpool@1.0",
+        "android.hardware.media.bufferpool@2.0",
         "android.hardware.media.c2@1.0",
         "android.hardware.media.omx@1.0",
         "libbase",
@@ -35,7 +35,7 @@
         "libhidltransport",
         "libhwbinder",
         "liblog",
-        "libstagefright_bufferpool@1.0",
+        "libstagefright_bufferpool@2.0",
         "libstagefright_bufferqueue_helper",
         "libui",
         "libutils",
@@ -49,7 +49,7 @@
         "android.hardware.media.c2@1.0",
         "libcodec2",
         "libhidlbase",
-        "libstagefright_bufferpool@1.0",
+        "libstagefright_bufferpool@2.0",
         "libstagefright_bufferqueue_helper",
         "libui",
     ],
diff --git a/media/codec2/hidl/1.0/utils/Component.cpp b/media/codec2/hidl/1.0/utils/Component.cpp
index aa4c6b2..5ae1972 100644
--- a/media/codec2/hidl/1.0/utils/Component.cpp
+++ b/media/codec2/hidl/1.0/utils/Component.cpp
@@ -410,7 +410,7 @@
         const std::shared_ptr<C2Component>& component,
         const sp<IComponentListener>& listener,
         const sp<ComponentStore>& store,
-        const sp<::android::hardware::media::bufferpool::V1_0::
+        const sp<::android::hardware::media::bufferpool::V2_0::
         IClientManager>& clientPoolManager) :
     Configurable(new CachedConfigurable(
             std::make_unique<CompIntf>(component->intf()))),
diff --git a/media/codec2/hidl/1.0/utils/ComponentStore.cpp b/media/codec2/hidl/1.0/utils/ComponentStore.cpp
index 1d1bbe0..9c05014 100644
--- a/media/codec2/hidl/1.0/utils/ComponentStore.cpp
+++ b/media/codec2/hidl/1.0/utils/ComponentStore.cpp
@@ -57,7 +57,7 @@
 
 using namespace ::android;
 using ::android::GraphicBufferSource;
-using namespace ::android::hardware::media::bufferpool::V1_0::implementation;
+using namespace ::android::hardware::media::bufferpool::V2_0::implementation;
 
 namespace /* unnamed */ {
 
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
index 36dec1c..0908226 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
@@ -20,7 +20,7 @@
 #include <codec2/hidl/1.0/Configurable.h>
 #include <codec2/hidl/1.0/types.h>
 
-#include <android/hardware/media/bufferpool/1.0/IClientManager.h>
+#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
 #include <android/hardware/media/c2/1.0/IComponentListener.h>
 #include <android/hardware/media/c2/1.0/IComponentStore.h>
 #include <android/hardware/media/c2/1.0/IComponent.h>
@@ -71,7 +71,7 @@
             const std::shared_ptr<C2Component>&,
             const sp<IComponentListener>& listener,
             const sp<ComponentStore>& store,
-            const sp<::android::hardware::media::bufferpool::V1_0::
+            const sp<::android::hardware::media::bufferpool::V2_0::
                 IClientManager>& clientPoolManager);
     c2_status_t status() const;
 
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
index 5821e8a..41e1416 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
@@ -20,7 +20,7 @@
 #include <codec2/hidl/1.0/Component.h>
 #include <codec2/hidl/1.0/Configurable.h>
 #include <android/hardware/media/c2/1.0/IComponentStore.h>
-#include <android/hardware/media/bufferpool/1.0/IClientManager.h>
+#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
 #include <hidl/Status.h>
 
 #include <C2Component.h>
@@ -40,7 +40,7 @@
 namespace V1_0 {
 namespace utils {
 
-using ::android::hardware::media::bufferpool::V1_0::IClientManager;
+using ::android::hardware::media::bufferpool::V2_0::IClientManager;
 
 using ::android::hardware::hidl_array;
 using ::android::hardware::hidl_handle;
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
index 58f5c96..d8a50b6 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
@@ -20,8 +20,8 @@
 #include <chrono>
 
 #include <bufferpool/ClientManager.h>
-#include <android/hardware/media/bufferpool/1.0/IClientManager.h>
-#include <android/hardware/media/bufferpool/1.0/types.h>
+#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
+#include <android/hardware/media/bufferpool/2.0/types.h>
 #include <android/hardware/media/c2/1.0/IComponentStore.h>
 #include <android/hardware/media/c2/1.0/types.h>
 #include <gui/IGraphicBufferProducer.h>
@@ -46,7 +46,7 @@
 using ::android::hardware::hidl_vec;
 using ::android::status_t;
 using ::android::sp;
-using ::android::hardware::media::bufferpool::V1_0::implementation::
+using ::android::hardware::media::bufferpool::V2_0::implementation::
         ConnectionId;
 using ::android::IGraphicBufferProducer;
 
@@ -131,9 +131,9 @@
 // Abstract class to be used in
 // objcpy(std::list<std::unique_ptr<C2Work>> -> WorkBundle).
 struct BufferPoolSender {
-    typedef ::android::hardware::media::bufferpool::V1_0::
+    typedef ::android::hardware::media::bufferpool::V2_0::
             ResultStatus ResultStatus;
-    typedef ::android::hardware::media::bufferpool::V1_0::
+    typedef ::android::hardware::media::bufferpool::V2_0::
             BufferStatusMessage BufferStatusMessage;
     typedef ::android::hardware::media::bufferpool::
             BufferPoolData BufferPoolData;
@@ -151,7 +151,7 @@
      *    other means so it can call receive() properly.
      * \return ResultStatus value that determines the success of the operation.
      *    (See the possible values of ResultStatus in
-     *    hardware/interfaces/media/bufferpool/1.0/types.hal.)
+     *    hardware/interfaces/media/bufferpool/2.0/types.hal.)
      */
     virtual ResultStatus send(
             const std::shared_ptr<BufferPoolData>& bpData,
@@ -168,9 +168,9 @@
 // IClientManager::registerSender() to establish the bufferpool connection when
 // send() is called.
 struct DefaultBufferPoolSender : BufferPoolSender {
-    typedef ::android::hardware::media::bufferpool::V1_0::implementation::
+    typedef ::android::hardware::media::bufferpool::V2_0::implementation::
             ClientManager ClientManager;
-    typedef ::android::hardware::media::bufferpool::V1_0::
+    typedef ::android::hardware::media::bufferpool::V2_0::
             IClientManager IClientManager;
 
     // Set the IClientManager instance of the receiving process and the refresh
@@ -278,7 +278,7 @@
  * \param BufferPool status
  * \return Corresponding c2_status_t
  */
-c2_status_t toC2Status(::android::hardware::media::bufferpool::V1_0::
+c2_status_t toC2Status(::android::hardware::media::bufferpool::V2_0::
         ResultStatus rs);
 
 // BufferQueue-Based Block Operations
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 5827504..c053bc1 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -49,11 +49,11 @@
 using namespace ::android;
 using ::android::hardware::Return;
 using ::android::hardware::media::bufferpool::BufferPoolData;
-using ::android::hardware::media::bufferpool::V1_0::BufferStatusMessage;
-using ::android::hardware::media::bufferpool::V1_0::ResultStatus;
-using ::android::hardware::media::bufferpool::V1_0::implementation::
+using ::android::hardware::media::bufferpool::V2_0::BufferStatusMessage;
+using ::android::hardware::media::bufferpool::V2_0::ResultStatus;
+using ::android::hardware::media::bufferpool::V2_0::implementation::
         ClientManager;
-using ::android::hardware::media::bufferpool::V1_0::implementation::
+using ::android::hardware::media::bufferpool::V2_0::implementation::
         TransactionId;
 using ::android::TWGraphicBufferProducer;
 
diff --git a/media/codec2/hidl/client/Android.bp b/media/codec2/hidl/client/Android.bp
index bdc02d2..a2a498d 100644
--- a/media/codec2/hidl/client/Android.bp
+++ b/media/codec2/hidl/client/Android.bp
@@ -7,7 +7,7 @@
 
     shared_libs: [
         "android.hardware.graphics.bufferqueue@1.0",
-        "android.hardware.media.bufferpool@1.0",
+        "android.hardware.media.bufferpool@2.0",
         "android.hardware.media.c2@1.0",
         "libbase",
         "libbinder",
@@ -19,7 +19,7 @@
         "libhidlbase",
         "libhidltransport",
         "liblog",
-        "libstagefright_bufferpool@1.0",
+        "libstagefright_bufferpool@2.0",
         "libstagefright_bufferqueue_helper",
         "libui",
         "libutils",
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index ddeb4ff..ff3e534 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -34,7 +34,7 @@
 #include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
 #undef LOG
 
-#include <android/hardware/media/bufferpool/1.0/IClientManager.h>
+#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
 #include <android/hardware/media/c2/1.0/IComponent.h>
 #include <android/hardware/media/c2/1.0/IComponentInterface.h>
 #include <android/hardware/media/c2/1.0/IComponentListener.h>
@@ -55,8 +55,8 @@
 
 using namespace ::android::hardware::media::c2::V1_0;
 using namespace ::android::hardware::media::c2::V1_0::utils;
-using namespace ::android::hardware::media::bufferpool::V1_0;
-using namespace ::android::hardware::media::bufferpool::V1_0::implementation;
+using namespace ::android::hardware::media::bufferpool::V2_0;
+using namespace ::android::hardware::media::bufferpool::V2_0::implementation;
 
 namespace /* unnamed */ {
 
@@ -76,7 +76,11 @@
 
 // Convenience methods to obtain known clients.
 std::shared_ptr<Codec2Client> getClient(size_t index) {
-    return Codec2Client::CreateFromService(kClientNames[index]);
+    uint32_t serviceMask = ::android::base::GetUintProperty(
+            "debug.media.codec2", uint32_t(0));
+    return Codec2Client::CreateFromService(
+            kClientNames[index],
+            (serviceMask & (1 << index)) != 0);
 }
 
 ClientList getClientList() {
@@ -633,9 +637,13 @@
             Base::tryGetService(instanceName);
     if (!baseStore) {
         if (waitForService) {
-            ALOGE("Codec2.0 service inaccessible. Check the device manifest.");
+            ALOGW("Codec2.0 service \"%s\" inaccessible. "
+                  "Check the device manifest.",
+                  instanceName);
         } else {
-            ALOGW("Codec2.0 service not available right now. Try again later.");
+            ALOGD("Codec2.0 service \"%s\" unavailable right now. "
+                  "Try again later.",
+                  instanceName);
         }
         return nullptr;
     }
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index 3ab3967..c48bf0c 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -85,9 +85,9 @@
 namespace hardware {
 namespace media {
 namespace bufferpool {
-namespace V1_0 {
+namespace V2_0 {
 struct IClientManager;
-} // namespace V1_0
+} // namespace V2_0
 } // namespace bufferpool
 } // namespace media
 } // namespace hardware
@@ -237,7 +237,7 @@
     mutable std::vector<std::unique_ptr<std::vector<std::string>>>
             mAliasesBuffer;
 
-    sp<::android::hardware::media::bufferpool::V1_0::IClientManager>
+    sp<::android::hardware::media::bufferpool::V2_0::IClientManager>
             mHostPoolManager;
 };
 
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index 965971e..216525e 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -37,58 +37,3 @@
     compile_multilib: "32",
 }
 
-cc_library_shared {
-    name: "libcodec2_serviceregistrant",
-    // need vendor version for update packaging, system version may have more dependencies
-    vendor_available: true,
-    srcs: [
-        "C2SoftwareCodecServiceRegistrant.cpp",
-    ],
-
-    header_libs: [
-        "libmedia_headers",
-    ],
-
-    shared_libs: [
-        "android.hardware.media.c2@1.0",
-        "libcodec2_hidl@1.0",
-        "libcodec2_vndk",
-        "liblog",
-        "libutils",
-    ],
-
-    // Codecs
-    runtime_libs: [
-        "libcodec2_soft_avcdec",
-        "libcodec2_soft_avcenc",
-        "libcodec2_soft_aacdec",
-        "libcodec2_soft_aacenc",
-        "libcodec2_soft_amrnbdec",
-        "libcodec2_soft_amrnbenc",
-        "libcodec2_soft_amrwbdec",
-        "libcodec2_soft_amrwbenc",
-        "libcodec2_soft_hevcdec",
-        "libcodec2_soft_g711alawdec",
-        "libcodec2_soft_g711mlawdec",
-        "libcodec2_soft_mpeg2dec",
-        "libcodec2_soft_h263dec",
-        "libcodec2_soft_h263enc",
-        "libcodec2_soft_mpeg4dec",
-        "libcodec2_soft_mpeg4enc",
-        "libcodec2_soft_mp3dec",
-        "libcodec2_soft_vorbisdec",
-        "libcodec2_soft_opusdec",
-        "libcodec2_soft_vp8dec",
-        "libcodec2_soft_vp9dec",
-        "libcodec2_soft_vp8enc",
-        "libcodec2_soft_vp9enc",
-        "libcodec2_soft_rawdec",
-        "libcodec2_soft_flacdec",
-        "libcodec2_soft_flacenc",
-        "libcodec2_soft_gsmdec",
-        "libcodec2_soft_xaacdec",
-    ],
-
-    compile_multilib: "32",
-}
-
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index f5578726..2870d39 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -54,8 +54,5 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index f903bbb..852d6d6 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1348,9 +1348,7 @@
 }
 
 void CCodec::signalSetParameters(const sp<AMessage> &params) {
-    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
-    msg->setMessage("params", params);
-    msg->post();
+    setParameters(params);
 }
 
 void CCodec::setParameters(const sp<AMessage> &params) {
@@ -1515,13 +1513,6 @@
             setInputSurface(surface);
             break;
         }
-        case kWhatSetParameters: {
-            setDeadline(now, 50ms, "setParameters");
-            sp<AMessage> params;
-            CHECK(msg->findMessage("params", &params));
-            setParameters(params);
-            break;
-        }
         case kWhatWorkDone: {
             std::unique_ptr<C2Work> work;
             size_t numDiscardedInputBuffers;
@@ -1594,6 +1585,7 @@
                     C2StreamColorAspectsInfo::output::PARAM_TYPE,
                     C2StreamDataSpaceInfo::output::PARAM_TYPE,
                     C2StreamHdrStaticInfo::output::PARAM_TYPE,
+                    C2StreamHdr10PlusInfo::output::PARAM_TYPE,
                     C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
                     C2StreamSurfaceScalingInfo::output::PARAM_TYPE
                 };
@@ -1677,7 +1669,7 @@
         deadline->set(std::chrono::steady_clock::now() + 3s, "eos");
     }
     // TODO: query and use input/pipeline/output delay combined
-    if (count >= 8) {
+    if (count >= 4) {
         CCodecWatchdog::getInstance()->watch(this);
         Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
         deadline->set(std::chrono::steady_clock::now() + 3s, "queue");
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 01b9c1e..55a97d8 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -787,8 +787,13 @@
     std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
             size_t size) final {
         int32_t capacity = kLinearBufferSize;
-        (void)mFormat->findInt32(C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING, &capacity);
-
+        (void)mFormat->findInt32(KEY_MAX_INPUT_SIZE, &capacity);
+        if ((size_t)capacity > kMaxLinearBufferSize) {
+            ALOGD("client requested %d, capped to %zu", capacity, kMaxLinearBufferSize);
+            capacity = kMaxLinearBufferSize;
+        }
+        // TODO: proper max input size
+        // TODO: read usage from intf
         std::unique_ptr<InputBuffersArray> array(
                 new InputBuffersArray(mComponentName.c_str(), "1D-Input[N]"));
         array->setPool(mPool);
@@ -1807,17 +1812,29 @@
 
 status_t CCodecBufferChannel::renderOutputBuffer(
         const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+    ALOGV("[%s] renderOutputBuffer: %p", mName, buffer.get());
     std::shared_ptr<C2Buffer> c2Buffer;
+    bool released = false;
     {
         Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
         if (*buffers) {
-            (*buffers)->releaseBuffer(buffer, &c2Buffer);
+            released = (*buffers)->releaseBuffer(buffer, &c2Buffer);
         }
     }
+    // NOTE: some apps try to releaseOutputBuffer() with timestamp and/or render
+    //       set to true.
+    sendOutputBuffers();
+    // input buffer feeding may have been gated by pending output buffers
+    feedInputBufferIfAvailable();
     if (!c2Buffer) {
+        if (released) {
+            ALOGD("[%s] The app is calling releaseOutputBuffer() with "
+                  "timestamp or render=true with non-video buffers. Apps should "
+                  "call releaseOutputBuffer() with render=false for those.",
+                  mName);
+        }
         return INVALID_OPERATION;
     }
-    sendOutputBuffers();
 
 #if 0
     const std::vector<std::shared_ptr<const C2Info>> infoParams = c2Buffer->info();
@@ -1871,6 +1888,11 @@
         std::static_pointer_cast<const C2StreamHdrStaticInfo::output>(
                 c2Buffer->getInfo(C2StreamHdrStaticInfo::output::PARAM_TYPE));
 
+    // HDR10 plus info
+    std::shared_ptr<const C2StreamHdr10PlusInfo::output> hdr10PlusInfo =
+        std::static_pointer_cast<const C2StreamHdr10PlusInfo::output>(
+                c2Buffer->getInfo(C2StreamHdr10PlusInfo::output::PARAM_TYPE));
+
     {
         Mutexed<OutputSurface>::Locked output(mOutputSurface);
         if (output->surface == nullptr) {
@@ -1898,35 +1920,45 @@
             videoScalingMode,
             transform,
             Fence::NO_FENCE, 0);
-    if (hdrStaticInfo) {
-        struct android_smpte2086_metadata smpte2086_meta = {
-            .displayPrimaryRed = {
-                hdrStaticInfo->mastering.red.x, hdrStaticInfo->mastering.red.y
-            },
-            .displayPrimaryGreen = {
-                hdrStaticInfo->mastering.green.x, hdrStaticInfo->mastering.green.y
-            },
-            .displayPrimaryBlue = {
-                hdrStaticInfo->mastering.blue.x, hdrStaticInfo->mastering.blue.y
-            },
-            .whitePoint = {
-                hdrStaticInfo->mastering.white.x, hdrStaticInfo->mastering.white.y
-            },
-            .maxLuminance = hdrStaticInfo->mastering.maxLuminance,
-            .minLuminance = hdrStaticInfo->mastering.minLuminance,
-        };
-
-        struct android_cta861_3_metadata cta861_meta = {
-            .maxContentLightLevel = hdrStaticInfo->maxCll,
-            .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
-        };
-
+    if (hdrStaticInfo || hdr10PlusInfo) {
         HdrMetadata hdr;
-        hdr.validTypes = HdrMetadata::SMPTE2086 | HdrMetadata::CTA861_3;
-        hdr.smpte2086 = smpte2086_meta;
-        hdr.cta8613 = cta861_meta;
+        if (hdrStaticInfo) {
+            struct android_smpte2086_metadata smpte2086_meta = {
+                .displayPrimaryRed = {
+                    hdrStaticInfo->mastering.red.x, hdrStaticInfo->mastering.red.y
+                },
+                .displayPrimaryGreen = {
+                    hdrStaticInfo->mastering.green.x, hdrStaticInfo->mastering.green.y
+                },
+                .displayPrimaryBlue = {
+                    hdrStaticInfo->mastering.blue.x, hdrStaticInfo->mastering.blue.y
+                },
+                .whitePoint = {
+                    hdrStaticInfo->mastering.white.x, hdrStaticInfo->mastering.white.y
+                },
+                .maxLuminance = hdrStaticInfo->mastering.maxLuminance,
+                .minLuminance = hdrStaticInfo->mastering.minLuminance,
+            };
+
+            struct android_cta861_3_metadata cta861_meta = {
+                .maxContentLightLevel = hdrStaticInfo->maxCll,
+                .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
+            };
+
+            hdr.validTypes = HdrMetadata::SMPTE2086 | HdrMetadata::CTA861_3;
+            hdr.smpte2086 = smpte2086_meta;
+            hdr.cta8613 = cta861_meta;
+        }
+        if (hdr10PlusInfo) {
+            hdr.validTypes |= HdrMetadata::HDR10PLUS;
+            hdr.hdr10plus.assign(
+                    hdr10PlusInfo->m.value,
+                    hdr10PlusInfo->m.value + hdr10PlusInfo->flexCount());
+        }
         qbi.setHdrMetadata(hdr);
     }
+    // we don't have dirty regions
+    qbi.setSurfaceDamage(Region::INVALID_REGION);
     android::IGraphicBufferProducer::QueueBufferOutput qbo;
     status_t result = mComponent->queueToOutputSurface(block, qbi, &qbo);
     if (result != OK) {
@@ -1961,8 +1993,8 @@
         }
     }
     if (released) {
-        feedInputBufferIfAvailable();
         sendOutputBuffers();
+        feedInputBufferIfAvailable();
     } else {
         ALOGD("[%s] MediaCodec discarded an unknown buffer", mName);
     }
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 8dbfd0e..ef02e74 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -570,6 +570,12 @@
     add(ConfigMapper("csd-0",           C2_PARAMKEY_INIT_DATA,       "value")
         .limitTo(D::OUTPUT & D::READ));
 
+    add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO, "value")
+        .limitTo(D::VIDEO & D::PARAM & D::INPUT));
+
+    add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO, "value")
+        .limitTo(D::VIDEO & D::OUTPUT));
+
     add(ConfigMapper(C2_PARAMKEY_TEMPORAL_LAYERING, C2_PARAMKEY_TEMPORAL_LAYERING, "")
         .limitTo(D::ENCODER & D::VIDEO & D::OUTPUT));
 
@@ -624,7 +630,23 @@
         .limitTo(D::AUDIO & D::CODED));
 
     add(ConfigMapper(KEY_PCM_ENCODING,  C2_PARAMKEY_PCM_ENCODING,       "value")
-        .limitTo(D::AUDIO));
+        .limitTo(D::AUDIO)
+        .withMappers([](C2Value v) -> C2Value {
+            int32_t value;
+            C2Config::pcm_encoding_t to;
+            if (v.get(&value) && C2Mapper::map(value, &to)) {
+                return to;
+            }
+            return C2Value();
+        }, [](C2Value v) -> C2Value {
+            C2Config::pcm_encoding_t value;
+            int32_t to;
+            using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
+            if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
+                return to;
+            }
+            return C2Value();
+        }));
 
     add(ConfigMapper(KEY_IS_ADTS, C2_PARAMKEY_AAC_PACKAGING, "value")
         .limitTo(D::AUDIO & D::CODED)
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index bf6062e..1113ae8 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -109,9 +109,11 @@
 
 // DummyContainerBuffer
 
+static uint8_t sDummyByte[1] = { 0 };
+
 DummyContainerBuffer::DummyContainerBuffer(
         const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer)
-    : Codec2Buffer(format, new ABuffer(nullptr, 1)),
+    : Codec2Buffer(format, new ABuffer(sDummyByte, 1)),
       mBufferRef(buffer) {
     setRange(0, buffer ? 1 : 0);
 }
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index bd4983c..eb6c3e9 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -32,8 +32,5 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index b7519da..84d22a3 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -88,16 +88,30 @@
 
         uint32_t planeW = img->mWidth / plane.colSampling;
         uint32_t planeH = img->mHeight / plane.rowSampling;
-        for (uint32_t row = 0; row < planeH; ++row) {
-            decltype(imgRow) imgPtr = imgRow;
-            decltype(viewRow) viewPtr = viewRow;
-            for (uint32_t col = 0; col < planeW; ++col) {
-                MemCopier<ToMediaImage, 0>::copy(imgPtr, viewPtr, bpp);
-                imgPtr += img->mPlane[i].mColInc;
-                viewPtr += plane.colInc;
+
+        bool canCopyByRow = (plane.colInc == 1) && (img->mPlane[i].mColInc == 1);
+        bool canCopyByPlane = canCopyByRow && (plane.rowInc == img->mPlane[i].mRowInc);
+        if (canCopyByPlane) {
+            MemCopier<ToMediaImage, 0>::copy(imgRow, viewRow, plane.rowInc * planeH);
+        } else if (canCopyByRow) {
+            for (uint32_t row = 0; row < planeH; ++row) {
+                MemCopier<ToMediaImage, 0>::copy(
+                        imgRow, viewRow, std::min(plane.rowInc, img->mPlane[i].mRowInc));
+                imgRow += img->mPlane[i].mRowInc;
+                viewRow += plane.rowInc;
             }
-            imgRow += img->mPlane[i].mRowInc;
-            viewRow += plane.rowInc;
+        } else {
+            for (uint32_t row = 0; row < planeH; ++row) {
+                decltype(imgRow) imgPtr = imgRow;
+                decltype(viewRow) viewPtr = viewRow;
+                for (uint32_t col = 0; col < planeW; ++col) {
+                    MemCopier<ToMediaImage, 0>::copy(imgPtr, viewPtr, bpp);
+                    imgPtr += img->mPlane[i].mColInc;
+                    viewPtr += plane.colInc;
+                }
+                imgRow += img->mPlane[i].mRowInc;
+                viewRow += plane.rowInc;
+            }
         }
     }
     return OK;
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index 0eb90be..e0b1355 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -35,7 +35,7 @@
 
     export_shared_lib_headers: [
         "libbase",
-        "android.hardware.media.bufferpool@1.0",
+        "android.hardware.media.bufferpool@2.0",
     ],
 
     local_include_dirs: [
@@ -51,7 +51,7 @@
         "android.hardware.graphics.allocator@2.0",
         "android.hardware.graphics.bufferqueue@1.0",
         "android.hardware.graphics.mapper@2.0",
-        "android.hardware.media.bufferpool@1.0",
+        "android.hardware.media.bufferpool@2.0",
         "libbase",
         "libbinder",
         "libcutils",
@@ -63,7 +63,7 @@
         "liblog",
         "libstagefright_bufferqueue_helper",
         "libstagefright_foundation",
-        "libstagefright_bufferpool@1.0",
+        "libstagefright_bufferpool@2.0",
         "libui",
         "libutils",
     ],
diff --git a/media/codec2/vndk/C2Buffer.cpp b/media/codec2/vndk/C2Buffer.cpp
index 47366ca..710b536 100644
--- a/media/codec2/vndk/C2Buffer.cpp
+++ b/media/codec2/vndk/C2Buffer.cpp
@@ -33,12 +33,12 @@
 using android::C2AllocatorGralloc;
 using android::C2AllocatorIon;
 using android::hardware::media::bufferpool::BufferPoolData;
-using android::hardware::media::bufferpool::V1_0::ResultStatus;
-using android::hardware::media::bufferpool::V1_0::implementation::BufferPoolAllocation;
-using android::hardware::media::bufferpool::V1_0::implementation::BufferPoolAllocator;
-using android::hardware::media::bufferpool::V1_0::implementation::ClientManager;
-using android::hardware::media::bufferpool::V1_0::implementation::ConnectionId;
-using android::hardware::media::bufferpool::V1_0::implementation::INVALID_CONNECTIONID;
+using android::hardware::media::bufferpool::V2_0::ResultStatus;
+using android::hardware::media::bufferpool::V2_0::implementation::BufferPoolAllocation;
+using android::hardware::media::bufferpool::V2_0::implementation::BufferPoolAllocator;
+using android::hardware::media::bufferpool::V2_0::implementation::ClientManager;
+using android::hardware::media::bufferpool::V2_0::implementation::ConnectionId;
+using android::hardware::media::bufferpool::V2_0::implementation::INVALID_CONNECTIONID;
 
 // This anonymous namespace contains the helper classes that allow our implementation to create
 // block/buffer objects.
diff --git a/media/codec2/vndk/C2PlatformStorePluginLoader.cpp b/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
index 7a460f4..4c330e5 100644
--- a/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
+++ b/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
@@ -26,6 +26,12 @@
 /* static */ android::Mutex C2PlatformStorePluginLoader::sMutex;
 /* static */ std::unique_ptr<C2PlatformStorePluginLoader> C2PlatformStorePluginLoader::sInstance;
 
+namespace /* unnamed */ {
+
+constexpr const char kStorePluginPath[] = "libc2plugin_store.so";
+
+}  // unnamed
+
 C2PlatformStorePluginLoader::C2PlatformStorePluginLoader(const char *libPath)
     : mCreateBlockPool(nullptr) {
     mLibHandle = dlopen(libPath, RTLD_NOW | RTLD_NODELETE);
@@ -89,7 +95,7 @@
     android::Mutex::Autolock _l(sMutex);
     if (!sInstance) {
         ALOGV("Loading library");
-        sInstance.reset(new C2PlatformStorePluginLoader("libstagefright_ccodec_ext.so"));
+        sInstance.reset(new C2PlatformStorePluginLoader(kStorePluginPath));
     }
     return sInstance;
 }
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index 33019ed..2d4e19e 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -798,65 +798,65 @@
         mComponentsList.emplace_back(alias);
     };
     // TODO: move this also into a .so so it can be updated
-    emplace("c2.android.avc.decoder", "libstagefright_soft_c2avcdec.so");
-    emplace("c2.android.avc.encoder", "libstagefright_soft_c2avcenc.so");
-    emplace("c2.android.aac.decoder", "libstagefright_soft_c2aacdec.so");
-    emplace("c2.android.aac.encoder", "libstagefright_soft_c2aacenc.so");
-    emplace("c2.android.amrnb.decoder", "libstagefright_soft_c2amrnbdec.so");
-    emplace("c2.android.amrnb.encoder", "libstagefright_soft_c2amrnbenc.so");
-    emplace("c2.android.amrwb.decoder", "libstagefright_soft_c2amrwbdec.so");
-    emplace("c2.android.amrwb.encoder", "libstagefright_soft_c2amrwbenc.so");
-    emplace("c2.android.hevc.decoder", "libstagefright_soft_c2hevcdec.so");
-    emplace("c2.android.g711.alaw.decoder", "libstagefright_soft_c2g711alawdec.so");
-    emplace("c2.android.g711.mlaw.decoder", "libstagefright_soft_c2g711mlawdec.so");
-    emplace("c2.android.mpeg2.decoder", "libstagefright_soft_c2mpeg2dec.so");
-    emplace("c2.android.h263.decoder", "libstagefright_soft_c2h263dec.so");
-    emplace("c2.android.h263.encoder", "libstagefright_soft_c2h263enc.so");
-    emplace("c2.android.mpeg4.decoder", "libstagefright_soft_c2mpeg4dec.so");
-    emplace("c2.android.mpeg4.encoder", "libstagefright_soft_c2mpeg4enc.so");
-    emplace("c2.android.mp3.decoder", "libstagefright_soft_c2mp3dec.so");
-    emplace("c2.android.vorbis.decoder", "libstagefright_soft_c2vorbisdec.so");
-    emplace("c2.android.opus.decoder", "libstagefright_soft_c2opusdec.so");
-    emplace("c2.android.vp8.decoder", "libstagefright_soft_c2vp8dec.so");
-    emplace("c2.android.vp9.decoder", "libstagefright_soft_c2vp9dec.so");
-    emplace("c2.android.vp8.encoder", "libstagefright_soft_c2vp8enc.so");
-    emplace("c2.android.vp9.encoder", "libstagefright_soft_c2vp9enc.so");
-    emplace("c2.android.raw.decoder", "libstagefright_soft_c2rawdec.so");
-    emplace("c2.android.flac.decoder", "libstagefright_soft_c2flacdec.so");
-    emplace("c2.android.flac.encoder", "libstagefright_soft_c2flacenc.so");
-    emplace("c2.android.gsm.decoder", "libstagefright_soft_c2gsmdec.so");
-    emplace("c2.android.xaac.decoder", "libstagefright_soft_c2xaacdec.so");
+    emplace("c2.android.avc.decoder", "libcodec2_soft_avcdec.so");
+    emplace("c2.android.avc.encoder", "libcodec2_soft_avcenc.so");
+    emplace("c2.android.aac.decoder", "libcodec2_soft_aacdec.so");
+    emplace("c2.android.aac.encoder", "libcodec2_soft_aacenc.so");
+    emplace("c2.android.amrnb.decoder", "libcodec2_soft_amrnbdec.so");
+    emplace("c2.android.amrnb.encoder", "libcodec2_soft_amrnbenc.so");
+    emplace("c2.android.amrwb.decoder", "libcodec2_soft_amrwbdec.so");
+    emplace("c2.android.amrwb.encoder", "libcodec2_soft_amrwbenc.so");
+    emplace("c2.android.hevc.decoder", "libcodec2_soft_hevcdec.so");
+    emplace("c2.android.g711.alaw.decoder", "libcodec2_soft_g711alawdec.so");
+    emplace("c2.android.g711.mlaw.decoder", "libcodec2_soft_g711mlawdec.so");
+    emplace("c2.android.mpeg2.decoder", "libcodec2_soft_mpeg2dec.so");
+    emplace("c2.android.h263.decoder", "libcodec2_soft_h263dec.so");
+    emplace("c2.android.h263.encoder", "libcodec2_soft_h263enc.so");
+    emplace("c2.android.mpeg4.decoder", "libcodec2_soft_mpeg4dec.so");
+    emplace("c2.android.mpeg4.encoder", "libcodec2_soft_mpeg4enc.so");
+    emplace("c2.android.mp3.decoder", "libcodec2_soft_mp3dec.so");
+    emplace("c2.android.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
+    emplace("c2.android.opus.decoder", "libcodec2_soft_opusdec.so");
+    emplace("c2.android.vp8.decoder", "libcodec2_soft_vp8dec.so");
+    emplace("c2.android.vp9.decoder", "libcodec2_soft_vp9dec.so");
+    emplace("c2.android.vp8.encoder", "libcodec2_soft_vp8enc.so");
+    emplace("c2.android.vp9.encoder", "libcodec2_soft_vp9enc.so");
+    emplace("c2.android.raw.decoder", "libcodec2_soft_rawdec.so");
+    emplace("c2.android.flac.decoder", "libcodec2_soft_flacdec.so");
+    emplace("c2.android.flac.encoder", "libcodec2_soft_flacenc.so");
+    emplace("c2.android.gsm.decoder", "libcodec2_soft_gsmdec.so");
+    emplace("c2.android.xaac.decoder", "libcodec2_soft_xaacdec.so");
 
     // "Aliases"
     // TODO: use aliases proper from C2Component::Traits
-    emplace("OMX.google.h264.decoder", "libstagefright_soft_c2avcdec.so");
-    emplace("OMX.google.h264.encoder", "libstagefright_soft_c2avcenc.so");
-    emplace("OMX.google.aac.decoder", "libstagefright_soft_c2aacdec.so");
-    emplace("OMX.google.aac.encoder", "libstagefright_soft_c2aacenc.so");
-    emplace("OMX.google.amrnb.decoder", "libstagefright_soft_c2amrnbdec.so");
-    emplace("OMX.google.amrnb.encoder", "libstagefright_soft_c2amrnbenc.so");
-    emplace("OMX.google.amrwb.decoder", "libstagefright_soft_c2amrwbdec.so");
-    emplace("OMX.google.amrwb.encoder", "libstagefright_soft_c2amrwbenc.so");
-    emplace("OMX.google.hevc.decoder", "libstagefright_soft_c2hevcdec.so");
-    emplace("OMX.google.g711.alaw.decoder", "libstagefright_soft_c2g711alawdec.so");
-    emplace("OMX.google.g711.mlaw.decoder", "libstagefright_soft_c2g711mlawdec.so");
-    emplace("OMX.google.mpeg2.decoder", "libstagefright_soft_c2mpeg2dec.so");
-    emplace("OMX.google.h263.decoder", "libstagefright_soft_c2h263dec.so");
-    emplace("OMX.google.h263.encoder", "libstagefright_soft_c2h263enc.so");
-    emplace("OMX.google.mpeg4.decoder", "libstagefright_soft_c2mpeg4dec.so");
-    emplace("OMX.google.mpeg4.encoder", "libstagefright_soft_c2mpeg4enc.so");
-    emplace("OMX.google.mp3.decoder", "libstagefright_soft_c2mp3dec.so");
-    emplace("OMX.google.vorbis.decoder", "libstagefright_soft_c2vorbisdec.so");
-    emplace("OMX.google.opus.decoder", "libstagefright_soft_c2opusdec.so");
-    emplace("OMX.google.vp8.decoder", "libstagefright_soft_c2vp8dec.so");
-    emplace("OMX.google.vp9.decoder", "libstagefright_soft_c2vp9dec.so");
-    emplace("OMX.google.vp8.encoder", "libstagefright_soft_c2vp8enc.so");
-    emplace("OMX.google.vp9.encoder", "libstagefright_soft_c2vp9enc.so");
-    emplace("OMX.google.raw.decoder", "libstagefright_soft_c2rawdec.so");
-    emplace("OMX.google.flac.decoder", "libstagefright_soft_c2flacdec.so");
-    emplace("OMX.google.flac.encoder", "libstagefright_soft_c2flacenc.so");
-    emplace("OMX.google.gsm.decoder", "libstagefright_soft_c2gsmdec.so");
-    emplace("OMX.google.xaac.decoder", "libstagefright_soft_c2xaacdec.so");
+    emplace("OMX.google.h264.decoder", "libcodec2_soft_avcdec.so");
+    emplace("OMX.google.h264.encoder", "libcodec2_soft_avcenc.so");
+    emplace("OMX.google.aac.decoder", "libcodec2_soft_aacdec.so");
+    emplace("OMX.google.aac.encoder", "libcodec2_soft_aacenc.so");
+    emplace("OMX.google.amrnb.decoder", "libcodec2_soft_amrnbdec.so");
+    emplace("OMX.google.amrnb.encoder", "libcodec2_soft_amrnbenc.so");
+    emplace("OMX.google.amrwb.decoder", "libcodec2_soft_amrwbdec.so");
+    emplace("OMX.google.amrwb.encoder", "libcodec2_soft_amrwbenc.so");
+    emplace("OMX.google.hevc.decoder", "libcodec2_soft_hevcdec.so");
+    emplace("OMX.google.g711.alaw.decoder", "libcodec2_soft_g711alawdec.so");
+    emplace("OMX.google.g711.mlaw.decoder", "libcodec2_soft_g711mlawdec.so");
+    emplace("OMX.google.mpeg2.decoder", "libcodec2_soft_mpeg2dec.so");
+    emplace("OMX.google.h263.decoder", "libcodec2_soft_h263dec.so");
+    emplace("OMX.google.h263.encoder", "libcodec2_soft_h263enc.so");
+    emplace("OMX.google.mpeg4.decoder", "libcodec2_soft_mpeg4dec.so");
+    emplace("OMX.google.mpeg4.encoder", "libcodec2_soft_mpeg4enc.so");
+    emplace("OMX.google.mp3.decoder", "libcodec2_soft_mp3dec.so");
+    emplace("OMX.google.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
+    emplace("OMX.google.opus.decoder", "libcodec2_soft_opusdec.so");
+    emplace("OMX.google.vp8.decoder", "libcodec2_soft_vp8dec.so");
+    emplace("OMX.google.vp9.decoder", "libcodec2_soft_vp9dec.so");
+    emplace("OMX.google.vp8.encoder", "libcodec2_soft_vp8enc.so");
+    emplace("OMX.google.vp9.encoder", "libcodec2_soft_vp9enc.so");
+    emplace("OMX.google.raw.decoder", "libcodec2_soft_rawdec.so");
+    emplace("OMX.google.flac.decoder", "libcodec2_soft_flacdec.so");
+    emplace("OMX.google.flac.encoder", "libcodec2_soft_flacenc.so");
+    emplace("OMX.google.gsm.decoder", "libcodec2_soft_gsmdec.so");
+    emplace("OMX.google.xaac.decoder", "libcodec2_soft_xaacdec.so");
 }
 
 c2_status_t C2PlatformComponentStore::copyBuffer(
diff --git a/media/codec2/vndk/include/C2BufferPriv.h b/media/codec2/vndk/include/C2BufferPriv.h
index d0b9152..be5f69c 100644
--- a/media/codec2/vndk/include/C2BufferPriv.h
+++ b/media/codec2/vndk/include/C2BufferPriv.h
@@ -20,7 +20,7 @@
 #include <functional>
 
 #include <C2Buffer.h>
-#include <android/hardware/media/bufferpool/1.0/IAccessor.h>
+#include <android/hardware/media/bufferpool/2.0/IAccessor.h>
 
 class C2BasicLinearBlockPool : public C2BlockPool {
 public:
@@ -112,7 +112,7 @@
      * \return true             IAcessor is writen successfully.
      * \return false            IAccessor is not written.
      */
-    bool getAccessor(android::sp<android::hardware::media::bufferpool::V1_0::IAccessor> *accessor);
+    bool getAccessor(android::sp<android::hardware::media::bufferpool::V2_0::IAccessor> *accessor);
 
 private:
     const std::shared_ptr<C2Allocator> mAllocator;
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index f278107..9384ebf 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -31,7 +31,7 @@
 
 namespace android {
 
-class AACSource : public MediaTrackHelperV2 {
+class AACSource : public MediaTrackHelperV3 {
 public:
     AACSource(
             DataSourceHelper *source,
@@ -45,7 +45,7 @@
     virtual media_status_t getFormat(AMediaFormat*);
 
     virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~AACSource();
@@ -58,7 +58,6 @@
     off64_t mOffset;
     int64_t mCurrentTimeUs;
     bool mStarted;
-    MediaBufferGroup *mGroup;
 
     Vector<uint64_t> mOffsetVector;
     int64_t mFrameDurationUs;
@@ -196,7 +195,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV2 *AACExtractor::getTrack(size_t index) {
+MediaTrackHelperV3 *AACExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -227,7 +226,6 @@
       mOffset(0),
       mCurrentTimeUs(0),
       mStarted(false),
-      mGroup(NULL),
       mOffsetVector(offset_vector),
       mFrameDurationUs(frame_duration_us) {
 }
@@ -248,8 +246,7 @@
     }
 
     mCurrentTimeUs = 0;
-    mGroup = new MediaBufferGroup;
-    mGroup->add_buffer(MediaBufferBase::Create(kMaxFrameSize));
+    mBufferGroup->add_buffer(kMaxFrameSize);
     mStarted = true;
 
     return AMEDIA_OK;
@@ -258,9 +255,6 @@
 media_status_t AACSource::stop() {
     CHECK(mStarted);
 
-    delete mGroup;
-    mGroup = NULL;
-
     mStarted = false;
     return AMEDIA_OK;
 }
@@ -270,7 +264,7 @@
 }
 
 media_status_t AACSource::read(
-        MediaBufferBase **out, const ReadOptions *options) {
+        MediaBufferHelperV3 **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -293,8 +287,8 @@
         return AMEDIA_ERROR_END_OF_STREAM;
     }
 
-    MediaBufferBase *buffer;
-    status_t err = mGroup->acquire_buffer(&buffer);
+    MediaBufferHelperV3 *buffer;
+    status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return AMEDIA_ERROR_UNKNOWN;
     }
@@ -309,8 +303,9 @@
     }
 
     buffer->set_range(0, frameSizeWithoutHeader);
-    buffer->meta_data().setInt64(kKeyTime, mCurrentTimeUs);
-    buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+    AMediaFormat *meta = buffer->meta_data();
+    AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, mCurrentTimeUs);
+    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
 
     mOffset += frameSize;
     mCurrentTimeUs += mFrameDurationUs;
@@ -321,14 +316,14 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-static CMediaExtractorV2* CreateExtractor(
+static CMediaExtractorV3* CreateExtractor(
         CDataSource *source,
         void *meta) {
     off64_t offset = *static_cast<off64_t*>(meta);
-    return wrapV2(new AACExtractor(new DataSourceHelper(source), offset));
+    return wrapV3(new AACExtractor(new DataSourceHelper(source), offset));
 }
 
-static CreatorFuncV2 Sniff(
+static CreatorFuncV3 Sniff(
         CDataSource *source, float *confidence, void **meta,
         FreeMetaFunc *freeMeta) {
     off64_t pos = 0;
@@ -388,11 +383,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT,
+        EXTRACTORDEF_VERSION_CURRENT + 1,
         UUID("4fd80eae-03d2-4d72-9eb9-48fa6bb54613"),
         1, // version
         "AAC Extractor",
-        { .v2 = Sniff }
+        { .v3 = Sniff }
     };
 }
 
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/AACExtractor.h
index 3b11f95..be33bf5 100644
--- a/media/extractors/aac/AACExtractor.h
+++ b/media/extractors/aac/AACExtractor.h
@@ -29,12 +29,12 @@
 struct AMessage;
 class String8;
 
-class AACExtractor : public MediaExtractorPluginHelperV2 {
+class AACExtractor : public MediaExtractorPluginHelperV3 {
 public:
     AACExtractor(DataSourceHelper *source, off64_t offset);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV2 *getTrack(size_t index);
+    virtual MediaTrackHelperV3 *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 42b0a64..a58167a 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -8,7 +8,6 @@
 
     shared_libs: [
         "liblog",
-        "libmediaextractor",
         "libmediandk",
     ],
 
@@ -36,9 +35,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index 511b14d..7fd2a41 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -29,7 +29,7 @@
 
 namespace android {
 
-class AMRSource : public MediaTrackHelperV2 {
+class AMRSource : public MediaTrackHelperV3 {
 public:
     AMRSource(
             DataSourceHelper *source,
@@ -44,7 +44,7 @@
     virtual media_status_t getFormat(AMediaFormat *);
 
     virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~AMRSource();
@@ -209,7 +209,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV2 *AMRExtractor::getTrack(size_t index) {
+MediaTrackHelperV3 *AMRExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -255,8 +255,7 @@
 
     mOffset = mIsWide ? 9 : 6;
     mCurrentTimeUs = 0;
-    mGroup = new MediaBufferGroup;
-    mGroup->add_buffer(MediaBufferBase::Create(128));
+    mBufferGroup->add_buffer(128);
     mStarted = true;
 
     return AMEDIA_OK;
@@ -265,9 +264,6 @@
 media_status_t AMRSource::stop() {
     CHECK(mStarted);
 
-    delete mGroup;
-    mGroup = NULL;
-
     mStarted = false;
     return AMEDIA_OK;
 }
@@ -277,7 +273,7 @@
 }
 
 media_status_t AMRSource::read(
-        MediaBufferBase **out, const ReadOptions *options) {
+        MediaBufferHelperV3 **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -326,8 +322,8 @@
         return AMEDIA_ERROR_MALFORMED;
     }
 
-    MediaBufferBase *buffer;
-    status_t err = mGroup->acquire_buffer(&buffer);
+    MediaBufferHelperV3 *buffer;
+    status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return AMEDIA_ERROR_UNKNOWN;
     }
@@ -348,8 +344,9 @@
     }
 
     buffer->set_range(0, frameSize);
-    buffer->meta_data().setInt64(kKeyTime, mCurrentTimeUs);
-    buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+    AMediaFormat *meta = buffer->meta_data();
+    AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, mCurrentTimeUs);
+    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
 
     mOffset += frameSize;
     mCurrentTimeUs += 20000;  // Each frame is 20ms
@@ -366,22 +363,22 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT,
+        EXTRACTORDEF_VERSION_CURRENT + 1,
         UUID("c86639c9-2f31-40ac-a715-fa01b4493aaf"),
         1,
         "AMR Extractor",
         {
-           .v2 = [](
+           .v3 = [](
                     CDataSource *source,
                     float *confidence,
                     void **,
-                    FreeMetaFunc *) -> CreatorFuncV2 {
+                    FreeMetaFunc *) -> CreatorFuncV3 {
                 DataSourceHelper helper(source);
                 if (SniffAMR(&helper, nullptr, confidence)) {
                     return [](
                             CDataSource *source,
-                            void *) -> CMediaExtractorV2* {
-                        return wrapV2(new AMRExtractor(new DataSourceHelper(source)));};
+                            void *) -> CMediaExtractorV3* {
+                        return wrapV3(new AMRExtractor(new DataSourceHelper(source)));};
                 }
                 return NULL;
             }
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/AMRExtractor.h
index 44b2cbd..b50ce81 100644
--- a/media/extractors/amr/AMRExtractor.h
+++ b/media/extractors/amr/AMRExtractor.h
@@ -29,12 +29,12 @@
 class String8;
 #define OFFSET_TABLE_LEN    300
 
-class AMRExtractor : public MediaExtractorPluginHelperV2 {
+class AMRExtractor : public MediaExtractorPluginHelperV3 {
 public:
     explicit AMRExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV2 *getTrack(size_t index);
+    virtual MediaTrackHelperV3 *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index 83ea4c7..4bd933d 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -8,7 +8,6 @@
 
     shared_libs: [
         "liblog",
-        "libmediaextractor",
         "libmediandk",
     ],
 
@@ -34,9 +33,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 4d0a4cc..3a3d051 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -10,7 +10,6 @@
     shared_libs: [
         "libbinder_ndk",
         "liblog",
-        "libmediaextractor",
         "libmediandk",
     ],
 
@@ -40,9 +39,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index d54aaef..4e04605 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -36,7 +36,6 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MetaDataUtils.h>
-#include <media/stagefright/MediaBufferBase.h>
 #include <private/android_filesystem_config.h> // for AID_MEDIA
 #include <system/audio.h>
 
@@ -53,7 +52,7 @@
 
 class FLACParser;
 
-class FLACSource : public MediaTrackHelperV2 {
+class FLACSource : public MediaTrackHelperV3 {
 
 public:
     FLACSource(
@@ -66,7 +65,7 @@
     virtual media_status_t getFormat(AMediaFormat *meta);
 
     virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~FLACSource();
@@ -125,12 +124,12 @@
     }
 
     // media buffers
-    void allocateBuffers();
+    void allocateBuffers(MediaBufferGroupHelperV3 *group);
     void releaseBuffers();
-    MediaBufferBase *readBuffer() {
+    MediaBufferHelperV3 *readBuffer() {
         return readBuffer(false, 0LL);
     }
-    MediaBufferBase *readBuffer(FLAC__uint64 sample) {
+    MediaBufferHelperV3 *readBuffer(FLAC__uint64 sample) {
         return readBuffer(true, sample);
     }
 
@@ -143,7 +142,7 @@
 
     // media buffers
     size_t mMaxBufferSize;
-    MediaBufferGroup *mGroup;
+    MediaBufferGroupHelperV3 *mGroup;
     void (*mCopy)(int16_t *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
 
     // handle to underlying libFLAC parser
@@ -167,7 +166,7 @@
     FLAC__StreamDecoderErrorStatus mErrorStatus;
 
     status_t init();
-    MediaBufferBase *readBuffer(bool doSeek, FLAC__uint64 sample);
+    MediaBufferHelperV3 *readBuffer(bool doSeek, FLAC__uint64 sample);
 
     // no copy constructor or assignment
     FLACParser(const FLACParser &);
@@ -401,6 +400,7 @@
 
 // Copy samples from FLAC native 32-bit non-interleaved to 16-bit signed
 // or 32-bit float interleaved.
+// TODO: Consider moving to audio_utils.
 // These are candidates for optimization if needed.
 static void copyTo16Signed(
         short *dst,
@@ -408,10 +408,19 @@
         unsigned nSamples,
         unsigned nChannels,
         unsigned bitsPerSample) {
-    const unsigned leftShift = 16 - bitsPerSample;
-    for (unsigned i = 0; i < nSamples; ++i) {
-        for (unsigned c = 0; c < nChannels; ++c) {
-            *dst++ = src[c][i] << leftShift;
+    const int leftShift = 16 - (int)bitsPerSample; // cast to int to prevent unsigned overflow.
+    if (leftShift >= 0) {
+        for (unsigned i = 0; i < nSamples; ++i) {
+            for (unsigned c = 0; c < nChannels; ++c) {
+                *dst++ = src[c][i] << leftShift;
+            }
+        }
+    } else {
+        const int rightShift = -leftShift;
+        for (unsigned i = 0; i < nSamples; ++i) {
+            for (unsigned c = 0; c < nChannels; ++c) {
+                *dst++ = src[c][i] >> rightShift;
+            }
         }
     }
 }
@@ -514,8 +523,10 @@
         case 8:
         case 16:
         case 24:
+        case 32: // generally not expected for FLAC
             break;
         default:
+            // Note: internally the FLAC extractor supports 2-32 bits.
             ALOGE("unsupported bits per sample %u", getBitsPerSample());
             return NO_INIT;
         }
@@ -532,8 +543,11 @@
         case 48000:
         case 88200:
         case 96000:
+        case 176400:
+        case 192000:
             break;
         default:
+            // Note: internally we support arbitrary sample rates from 8kHz to 192kHz.
             ALOGE("unsupported sample rate %u", getSampleRate());
             return NO_INIT;
         }
@@ -562,22 +576,19 @@
     return OK;
 }
 
-void FLACParser::allocateBuffers()
+void FLACParser::allocateBuffers(MediaBufferGroupHelperV3 *group)
 {
     CHECK(mGroup == NULL);
-    mGroup = new MediaBufferGroup;
+    mGroup = group;
     mMaxBufferSize = getMaxBlockSize() * getChannels() * getOutputSampleSize();
-    mGroup->add_buffer(MediaBufferBase::Create(mMaxBufferSize));
+    mGroup->add_buffer(mMaxBufferSize);
 }
 
 void FLACParser::releaseBuffers()
 {
-    CHECK(mGroup != NULL);
-    delete mGroup;
-    mGroup = NULL;
 }
 
-MediaBufferBase *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
+MediaBufferHelperV3 *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
 {
     mWriteRequested = true;
     mWriteCompleted = false;
@@ -614,7 +625,7 @@
     }
     // acquire a media buffer
     CHECK(mGroup != NULL);
-    MediaBufferBase *buffer;
+    MediaBufferHelperV3 *buffer;
     status_t err = mGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return NULL;
@@ -641,8 +652,9 @@
     CHECK(mWriteHeader.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
     FLAC__uint64 sampleNumber = mWriteHeader.number.sample_number;
     int64_t timeUs = (1000000LL * sampleNumber) / getSampleRate();
-    buffer->meta_data().setInt64(kKeyTime, timeUs);
-    buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+    AMediaFormat *meta = buffer->meta_data();
+    AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
+    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
     return buffer;
 }
 
@@ -676,7 +688,7 @@
     ALOGV("FLACSource::start");
 
     CHECK(!mStarted);
-    mParser->allocateBuffers();
+    mParser->allocateBuffers(mBufferGroup);
     mStarted = true;
 
     return AMEDIA_OK;
@@ -704,9 +716,9 @@
 }
 
 media_status_t FLACSource::read(
-        MediaBufferBase **outBuffer, const ReadOptions *options)
+        MediaBufferHelperV3 **outBuffer, const ReadOptions *options)
 {
-    MediaBufferBase *buffer;
+    MediaBufferHelperV3 *buffer;
     // process an optional seek request
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
@@ -760,7 +772,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV2 *FLACExtractor::getTrack(size_t index)
+MediaTrackHelperV3 *FLACExtractor::getTrack(size_t index)
 {
     if (mInitCheck != OK || index > 0) {
         return NULL;
@@ -816,22 +828,22 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-            EXTRACTORDEF_VERSION_CURRENT,
+            EXTRACTORDEF_VERSION_CURRENT + 1,
             UUID("1364b048-cc45-4fda-9934-327d0ebf9829"),
             1,
             "FLAC Extractor",
             {
-                .v2 = [](
+                .v3 = [](
                         CDataSource *source,
                         float *confidence,
                         void **,
-                        FreeMetaFunc *) -> CreatorFuncV2 {
+                        FreeMetaFunc *) -> CreatorFuncV3 {
                     DataSourceHelper helper(source);
                     if (SniffFLAC(&helper, confidence)) {
                         return [](
                                 CDataSource *source,
-                                void *) -> CMediaExtractorV2* {
-                            return wrapV2(new FLACExtractor(new DataSourceHelper(source)));};
+                                void *) -> CMediaExtractorV3* {
+                            return wrapV3(new FLACExtractor(new DataSourceHelper(source)));};
                     }
                     return NULL;
                 }
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/FLACExtractor.h
index 323307b..9604e4a 100644
--- a/media/extractors/flac/FLACExtractor.h
+++ b/media/extractors/flac/FLACExtractor.h
@@ -27,13 +27,13 @@
 
 class FLACParser;
 
-class FLACExtractor : public MediaExtractorPluginHelperV2 {
+class FLACExtractor : public MediaExtractorPluginHelperV3 {
 
 public:
     explicit FLACExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV2 *getTrack(size_t index);
+    virtual MediaTrackHelperV3 *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index c5df5c7..7d42e70 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -8,7 +8,6 @@
 
     shared_libs: [
         "liblog",
-        "libmediaextractor",
         "libmediandk",
     ],
 
@@ -35,9 +34,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/midi/MidiExtractor.cpp b/media/extractors/midi/MidiExtractor.cpp
index cd39cec..43f394c 100644
--- a/media/extractors/midi/MidiExtractor.cpp
+++ b/media/extractors/midi/MidiExtractor.cpp
@@ -25,15 +25,14 @@
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
 #include <libsonivox/eas_reverb.h>
 
 namespace android {
 
-// how many Sonivox output buffers to aggregate into one MediaBufferBase
+// how many Sonivox output buffers to aggregate into one MediaBuffer
 static const int NUM_COMBINE_BUFFERS = 4;
 
-class MidiSource : public MediaTrackHelperV2 {
+class MidiSource : public MediaTrackHelperV3 {
 
 public:
     MidiSource(
@@ -45,7 +44,7 @@
     virtual media_status_t getFormat(AMediaFormat *);
 
     virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~MidiSource();
@@ -93,7 +92,7 @@
 
     CHECK(!mStarted);
     mStarted = true;
-    mEngine.allocateBuffers();
+    mEngine.allocateBuffers(mBufferGroup);
     return AMEDIA_OK;
 }
 
@@ -114,10 +113,10 @@
 }
 
 media_status_t MidiSource::read(
-        MediaBufferBase **outBuffer, const ReadOptions *options)
+        MediaBufferHelperV3 **outBuffer, const ReadOptions *options)
 {
     ALOGV("MidiSource::read");
-    MediaBufferBase *buffer;
+    MediaBufferHelperV3 *buffer;
     // process an optional seek request
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
@@ -144,7 +143,6 @@
 MidiEngine::MidiEngine(CDataSource *dataSource,
         AMediaFormat *fileMetadata,
         AMediaFormat *trackMetadata) :
-            mGroup(NULL),
             mEasData(NULL),
             mEasHandle(NULL),
             mEasConfig(NULL),
@@ -194,7 +192,6 @@
     if (mEasData) {
         EAS_Shutdown(mEasData);
     }
-    delete mGroup;
     delete mIoWrapper;
 }
 
@@ -202,22 +199,20 @@
     return mIsInitialized ? OK : UNKNOWN_ERROR;
 }
 
-status_t MidiEngine::allocateBuffers() {
+status_t MidiEngine::allocateBuffers(MediaBufferGroupHelperV3 *group) {
     // select reverb preset and enable
     EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_PRESET, EAS_PARAM_REVERB_CHAMBER);
     EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_BYPASS, EAS_FALSE);
 
-    mGroup = new MediaBufferGroup;
     int bufsize = sizeof(EAS_PCM)
             * mEasConfig->mixBufferSize * mEasConfig->numChannels * NUM_COMBINE_BUFFERS;
     ALOGV("using %d byte buffer", bufsize);
-    mGroup->add_buffer(MediaBufferBase::Create(bufsize));
+    mGroup = group;
+    mGroup->add_buffer(bufsize);
     return OK;
 }
 
 status_t MidiEngine::releaseBuffers() {
-    delete mGroup;
-    mGroup = NULL;
     return OK;
 }
 
@@ -227,13 +222,13 @@
     return result == EAS_SUCCESS ? OK : UNKNOWN_ERROR;
 }
 
-MediaBufferBase* MidiEngine::readBuffer() {
+MediaBufferHelperV3* MidiEngine::readBuffer() {
     EAS_STATE state;
     EAS_State(mEasData, mEasHandle, &state);
     if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
         return NULL;
     }
-    MediaBufferBase *buffer;
+    MediaBufferHelperV3 *buffer;
     status_t err = mGroup->acquire_buffer(&buffer);
     if (err != OK) {
         ALOGE("readBuffer: no buffer");
@@ -242,7 +237,9 @@
     EAS_I32 timeMs;
     EAS_GetLocation(mEasData, mEasHandle, &timeMs);
     int64_t timeUs = 1000ll * timeMs;
-    buffer->meta_data().setInt64(kKeyTime, timeUs);
+    AMediaFormat *meta = buffer->meta_data();
+    AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeUs);
+    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
 
     EAS_PCM* p = (EAS_PCM*) buffer->data();
     int numBytesOutput = 0;
@@ -289,7 +286,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV2 *MidiExtractor::getTrack(size_t index)
+MediaTrackHelperV3 *MidiExtractor::getTrack(size_t index)
 {
     if (mInitCheck != OK || index > 0) {
         return NULL;
@@ -334,21 +331,21 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT,
+        EXTRACTORDEF_VERSION_CURRENT + 1,
         UUID("ef6cca0a-f8a2-43e6-ba5f-dfcd7c9a7ef2"),
         1,
         "MIDI Extractor",
         {
-            .v2 = [](
+            .v3 = [](
                 CDataSource *source,
                 float *confidence,
                 void **,
-                FreeMetaFunc *) -> CreatorFuncV2 {
+                FreeMetaFunc *) -> CreatorFuncV3 {
                 if (SniffMidi(source, confidence)) {
                     return [](
                             CDataSource *source,
-                            void *) -> CMediaExtractorV2* {
-                        return wrapV2(new MidiExtractor(source));};
+                            void *) -> CMediaExtractorV3* {
+                        return wrapV3(new MidiExtractor(source));};
                 }
                 return NULL;
             }
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/MidiExtractor.h
index 412d6eb..ad345b8 100644
--- a/media/extractors/midi/MidiExtractor.h
+++ b/media/extractors/midi/MidiExtractor.h
@@ -38,26 +38,26 @@
 
     status_t initCheck();
 
-    status_t allocateBuffers();
+    status_t allocateBuffers(MediaBufferGroupHelperV3 *group);
     status_t releaseBuffers();
     status_t seekTo(int64_t positionUs);
-    MediaBufferBase* readBuffer();
+    MediaBufferHelperV3* readBuffer();
 private:
     MidiIoWrapper *mIoWrapper;
-    MediaBufferGroup *mGroup;
+    MediaBufferGroupHelperV3 *mGroup;
     EAS_DATA_HANDLE mEasData;
     EAS_HANDLE mEasHandle;
     const S_EAS_LIB_CONFIG* mEasConfig;
     bool mIsInitialized;
 };
 
-class MidiExtractor : public MediaExtractorPluginHelperV2 {
+class MidiExtractor : public MediaExtractorPluginHelperV3 {
 
 public:
     explicit MidiExtractor(CDataSource *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV2 *getTrack(size_t index);
+    virtual MediaTrackHelperV3 *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index ae108ec..8d028e1 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -41,9 +41,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index 1025c46..4e2f248 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -12,14 +12,13 @@
 
     shared_libs: [
         "liblog",
-        "libmediaextractor",
         "libmediandk",
-        "libstagefright_foundation",
     ],
 
     static_libs: [
         "libutils",
         "libstagefright_id3",
+        "libstagefright_foundation",
     ],
 
     name: "libmp3extractor",
@@ -40,9 +39,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 0e1ffb4..7abec54 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -207,7 +207,7 @@
     return valid;
 }
 
-class MP3Source : public MediaTrackHelperV2 {
+class MP3Source : public MediaTrackHelperV3 {
 public:
     MP3Source(
             AMediaFormat *meta, DataSourceHelper *source,
@@ -220,7 +220,7 @@
     virtual media_status_t getFormat(AMediaFormat *meta);
 
     virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~MP3Source();
@@ -235,7 +235,6 @@
     int64_t mCurrentTimeUs;
     bool mStarted;
     MP3Seeker *mSeeker;
-    MediaBufferGroup *mGroup;
 
     int64_t mBasisTimeUs;
     int64_t mSamplesRead;
@@ -414,7 +413,7 @@
     return mInitCheck != OK ? 0 : 1;
 }
 
-MediaTrackHelperV2 *MP3Extractor::getTrack(size_t index) {
+MediaTrackHelperV3 *MP3Extractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -455,7 +454,6 @@
       mCurrentTimeUs(0),
       mStarted(false),
       mSeeker(seeker),
-      mGroup(NULL),
       mBasisTimeUs(0),
       mSamplesRead(0) {
 }
@@ -469,9 +467,7 @@
 media_status_t MP3Source::start() {
     CHECK(!mStarted);
 
-    mGroup = new MediaBufferGroup;
-
-    mGroup->add_buffer(MediaBufferBase::Create(kMaxFrameSize));
+    mBufferGroup->add_buffer(kMaxFrameSize);
 
     mCurrentPos = mFirstFramePos;
     mCurrentTimeUs = 0;
@@ -487,9 +483,6 @@
 media_status_t MP3Source::stop() {
     CHECK(mStarted);
 
-    delete mGroup;
-    mGroup = NULL;
-
     mStarted = false;
 
     return AMEDIA_OK;
@@ -500,7 +493,7 @@
 }
 
 media_status_t MP3Source::read(
-        MediaBufferBase **out, const ReadOptions *options) {
+        MediaBufferHelperV3 **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -530,8 +523,8 @@
         mSamplesRead = 0;
     }
 
-    MediaBufferBase *buffer;
-    status_t err = mGroup->acquire_buffer(&buffer);
+    MediaBufferHelperV3 *buffer;
+    status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return AMEDIA_ERROR_UNKNOWN;
     }
@@ -597,8 +590,9 @@
 
     buffer->set_range(0, frame_size);
 
-    buffer->meta_data().setInt64(kKeyTime, mCurrentTimeUs);
-    buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+    AMediaFormat *meta = buffer->meta_data();
+    AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, mCurrentTimeUs);
+    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
 
     mCurrentPos += frame_size;
 
@@ -674,14 +668,14 @@
     return AMEDIA_OK;
 }
 
-static CMediaExtractorV2* CreateExtractor(
+static CMediaExtractorV3* CreateExtractor(
         CDataSource *source,
         void *meta) {
     Mp3Meta *metaData = static_cast<Mp3Meta *>(meta);
-    return wrapV2(new MP3Extractor(new DataSourceHelper(source), metaData));
+    return wrapV3(new MP3Extractor(new DataSourceHelper(source), metaData));
 }
 
-static CreatorFuncV2 Sniff(
+static CreatorFuncV3 Sniff(
         CDataSource *source, float *confidence, void **meta,
         FreeMetaFunc *freeMeta) {
     off64_t pos = 0;
@@ -718,11 +712,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT,
+        EXTRACTORDEF_VERSION_CURRENT + 1,
         UUID("812a3f6c-c8cf-46de-b529-3774b14103d4"),
         1, // version
         "MP3 Extractor",
-        { .v2 = Sniff }
+        { .v3 = Sniff }
     };
 }
 
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/MP3Extractor.h
index 22547e2..fe72cff 100644
--- a/media/extractors/mp3/MP3Extractor.h
+++ b/media/extractors/mp3/MP3Extractor.h
@@ -32,13 +32,13 @@
 class String8;
 struct Mp3Meta;
 
-class MP3Extractor : public MediaExtractorPluginHelperV2 {
+class MP3Extractor : public MediaExtractorPluginHelperV3 {
 public:
     MP3Extractor(DataSourceHelper *source, Mp3Meta *meta);
     ~MP3Extractor();
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV2 *getTrack(size_t index);
+    virtual MediaTrackHelperV3 *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
index a95c2db..59a2e9b 100644
--- a/media/extractors/mp4/AC4Parser.cpp
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -310,13 +310,13 @@
                 pres_bytes += mBitReader.getBits(16);
             }
             ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
-            if (presentation_version > 1) {
+            if (presentation_version > 2) {
                 CHECK_BITS_LEFT(pres_bytes * 8);
                 mBitReader.skipBits(pres_bytes * 8);
                 continue;
             }
-            // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
-            // start with a presentation_config of 5 bits
+            // ac4_presentation_v0_dsi(), ac4_presentation_v1_dsi() and ac4_presentation_v2_dsi()
+            // all start with a presentation_config of 5 bits
             CHECK_BITS_LEFT(5);
             presentation_config = mBitReader.getBits(5);
             b_single_substream_group = (presentation_config == 0x1f);
@@ -363,7 +363,7 @@
             uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
             ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
                 dsi_frame_rate_multiply_info);
-            if (ac4_dsi_version == 1 && presentation_version == 1) {
+            if (ac4_dsi_version == 1 && (presentation_version == 1 || presentation_version == 2)) {
                 CHECK_BITS_LEFT(2);
                 uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
                 ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
@@ -386,7 +386,7 @@
                 ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
                     BOOLSTR(b_presentation_channel_coded));
                 if (b_presentation_channel_coded) {
-                    if (presentation_version == 1) {
+                    if (presentation_version == 1 || presentation_version == 2) {
                         CHECK_BITS_LEFT(5);
                         uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
                         mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
@@ -411,7 +411,7 @@
                     ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
                         presentation_channel_mask_v1);
                 }
-                if (presentation_version == 1) {
+                if (presentation_version == 1 || presentation_version == 2) {
                     CHECK_BITS_LEFT(1);
                     bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
                     ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 47dc718..91de353 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -48,9 +48,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 337ff2a..52213bd 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2733,8 +2733,7 @@
 
     // + 4-byte type
     offset += 4;
-    // at least for AC4 DSI v1 this is big enough
-    const uint32_t kAC4SpecificBoxPayloadSize = 256;
+    const uint32_t kAC4SpecificBoxPayloadSize = 1176;
     uint8_t chunk[kAC4SpecificBoxPayloadSize];
     ssize_t dsiSize = size - 8; // size of box - size and type fields
     if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
@@ -5353,7 +5352,7 @@
         }
     }
 
-    if ((!mIsAVC && !mIsHEVC && !mIsAC4)) {
+    if (!mIsAVC && !mIsHEVC && !mIsAC4) {
         if (newBuffer) {
             if (mIsPcm) {
                 // The twos' PCM block reader assumes that all samples has the same size.
@@ -5430,58 +5429,11 @@
             }
         }
 
-        if (!mIsAVC && !mIsHEVC && !mIsAC4) {
-            *out = mBuffer;
-            mBuffer = NULL;
-
-            return AMEDIA_OK;
-        }
-
-        if (mIsAC4) {
-            mBuffer->release();
-            mBuffer = NULL;
-
-            return AMEDIA_ERROR_IO;
-        }
-
-        // Each NAL unit is split up into its constituent fragments and
-        // each one of them returned in its own buffer.
-
-        CHECK(mBuffer->range_length() >= mNALLengthSize);
-
-        const uint8_t *src =
-            (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
-
-        size_t nal_size = parseNALSize(src);
-        if (mNALLengthSize > SIZE_MAX - nal_size) {
-            ALOGE("b/24441553, b/24445122");
-        }
-        if (mBuffer->range_length() - mNALLengthSize < nal_size) {
-            ALOGE("incomplete NAL unit.");
-
-            mBuffer->release();
-            mBuffer = NULL;
-
-            return AMEDIA_ERROR_MALFORMED;
-        }
-
-        MediaBufferBase *clone = mBuffer->clone();
-        CHECK(clone != NULL);
-        clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
-
-        CHECK(mBuffer != NULL);
-        mBuffer->set_range(
-                mBuffer->range_offset() + mNALLengthSize + nal_size,
-                mBuffer->range_length() - mNALLengthSize - nal_size);
-
-        if (mBuffer->range_length() == 0) {
-            mBuffer->release();
-            mBuffer = NULL;
-        }
-
-        *out = clone;
+        *out = mBuffer;
+        mBuffer = NULL;
 
         return AMEDIA_OK;
+
     } else if (mIsAC4) {
         CHECK(mBuffer != NULL);
         // Make sure there is enough space to write the sync header and the raw frame
@@ -5774,7 +5726,7 @@
 
     }
 
-    if ((!mIsAVC && !mIsHEVC)) {
+    if (!mIsAVC && !mIsHEVC) {
         if (newBuffer) {
             if (!isInRange((size_t)0u, mBuffer->size(), size)) {
                 mBuffer->release();
@@ -5826,52 +5778,11 @@
             ++mCurrentSampleIndex;
         }
 
-        if (!mIsAVC && !mIsHEVC) {
-            *out = mBuffer;
-            mBuffer = NULL;
-
-            return AMEDIA_OK;
-        }
-
-        // Each NAL unit is split up into its constituent fragments and
-        // each one of them returned in its own buffer.
-
-        CHECK(mBuffer->range_length() >= mNALLengthSize);
-
-        const uint8_t *src =
-            (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
-
-        size_t nal_size = parseNALSize(src);
-        if (mNALLengthSize > SIZE_MAX - nal_size) {
-            ALOGE("b/24441553, b/24445122");
-        }
-
-        if (mBuffer->range_length() - mNALLengthSize < nal_size) {
-            ALOGE("incomplete NAL unit.");
-
-            mBuffer->release();
-            mBuffer = NULL;
-
-            return AMEDIA_ERROR_MALFORMED;
-        }
-
-        MediaBufferBase *clone = mBuffer->clone();
-        CHECK(clone != NULL);
-        clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
-
-        CHECK(mBuffer != NULL);
-        mBuffer->set_range(
-                mBuffer->range_offset() + mNALLengthSize + nal_size,
-                mBuffer->range_length() - mNALLengthSize - nal_size);
-
-        if (mBuffer->range_length() == 0) {
-            mBuffer->release();
-            mBuffer = NULL;
-        }
-
-        *out = clone;
+        *out = mBuffer;
+        mBuffer = NULL;
 
         return AMEDIA_OK;
+
     } else {
         ALOGV("whole NAL");
         // Whole NAL units are returned but each fragment is prefixed by
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 40314dc..38c86eb 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -52,9 +52,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index c6deb18..b28877d 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -7,6 +7,10 @@
         "external/tremolo",
     ],
 
+    header_libs: [
+        "libaudio_system_headers",
+    ],
+
     shared_libs: [
         "liblog",
         "libmediaextractor",
@@ -38,9 +42,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index a52ccb1..cc2c792 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -34,6 +34,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <media/stagefright/MetaDataUtils.h>
+#include <system/audio.h>
 #include <utils/String8.h>
 
 extern "C" {
@@ -133,6 +134,8 @@
 
     Vector<TOCEntry> mTableOfContents;
 
+    int32_t mHapticChannelCount;
+
     ssize_t readPage(off64_t offset, Page *page);
     status_t findNextPage(off64_t startOffset, off64_t *pageOffset);
 
@@ -163,6 +166,8 @@
 
     void buildTableOfContents();
 
+    void setChannelMask(int channelCount);
+
     MyOggExtractor(const MyOggExtractor &);
     MyOggExtractor &operator=(const MyOggExtractor &);
 };
@@ -310,7 +315,8 @@
       mMimeType(mimeType),
       mNumHeaders(numHeaders),
       mSeekPreRollUs(seekPreRollUs),
-      mFirstDataOffset(-1) {
+      mFirstDataOffset(-1),
+      mHapticChannelCount(0) {
     mCurrentPage.mNumSegments = 0;
 
     vorbis_info_init(&mVi);
@@ -1083,6 +1089,7 @@
     }
 
     parseFileMetaData();
+    setChannelMask(mChannelCount);
     return AMEDIA_OK;
 }
 
@@ -1157,6 +1164,7 @@
             }
 
             parseFileMetaData();
+            setChannelMask(mVi.channels);
             break;
         }
 
@@ -1192,6 +1200,29 @@
         parseVorbisComment(mFileMeta, comment, commentLength);
         //ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
     }
+
+    AMediaFormat_getInt32(mFileMeta, "haptic", &mHapticChannelCount);
+}
+
+void MyOggExtractor::setChannelMask(int channelCount) {
+    // Set channel mask according to channel count. When haptic channel count is found in
+    // file meta, set haptic channel mask to try haptic playback.
+    if (mHapticChannelCount > 0) {
+        const audio_channel_mask_t hapticChannelMask =
+                haptic_channel_mask_from_count(mHapticChannelCount);
+        const int32_t audioChannelCount = channelCount - mHapticChannelCount;
+        if (hapticChannelMask == AUDIO_CHANNEL_INVALID
+                || audioChannelCount <= 0 || audioChannelCount > FCC_8) {
+            ALOGE("Invalid haptic channel count found in metadata: %d", mHapticChannelCount);
+        } else {
+            const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
+                    audioChannelCount) | hapticChannelMask;
+            AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
+        }
+    } else {
+        AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK,
+                audio_channel_out_mask_from_count(channelCount));
+    }
 }
 
 
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index 185bb32..7e89271 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -9,7 +9,6 @@
     shared_libs: [
         "libbinder_ndk",
         "liblog",
-        "libmediaextractor",
         "libmediandk",
     ],
 
@@ -37,9 +36,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index 86500ef..1f0aae5 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -22,9 +22,7 @@
 
 #include <android/binder_ibinder.h> // for AIBinder_getCallingUid
 #include <audio_utils/primitives.h>
-#include <media/DataSourceBase.h>
 #include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -68,7 +66,7 @@
     return ptr[1] << 8 | ptr[0];
 }
 
-struct WAVSource : public MediaTrackHelperV2 {
+struct WAVSource : public MediaTrackHelperV3 {
     WAVSource(
             DataSourceHelper *dataSource,
             AMediaFormat *meta,
@@ -81,7 +79,7 @@
     virtual media_status_t getFormat(AMediaFormat *meta);
 
     virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
 
     virtual bool supportNonblockingRead() { return true; }
 
@@ -101,7 +99,6 @@
     off64_t mOffset;
     size_t mSize;
     bool mStarted;
-    MediaBufferGroup *mGroup;
     off64_t mCurrentPos;
 
     WAVSource(const WAVSource &);
@@ -134,7 +131,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV2 *WAVExtractor::getTrack(size_t index) {
+MediaTrackHelperV3 *WAVExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index > 0) {
         return NULL;
     }
@@ -379,8 +376,7 @@
       mOutputFloat(outputFloat),
       mOffset(offset),
       mSize(size),
-      mStarted(false),
-      mGroup(NULL) {
+      mStarted(false) {
     CHECK(AMediaFormat_getInt32(mMeta, AMEDIAFORMAT_KEY_SAMPLE_RATE, &mSampleRate));
     CHECK(AMediaFormat_getInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &mNumChannels));
     CHECK(AMediaFormat_getInt32(mMeta, AMEDIAFORMAT_KEY_BITS_PER_SAMPLE, &mBitsPerSample));
@@ -398,7 +394,9 @@
     CHECK(!mStarted);
 
     // some WAV files may have large audio buffers that use shared memory transfer.
-    mGroup = new MediaBufferGroup(4 /* buffers */, kMaxFrameSize);
+    if (!mBufferGroup->init(4 /* buffers */, kMaxFrameSize)) {
+        return AMEDIA_ERROR_UNKNOWN;
+    }
 
     mCurrentPos = mOffset;
 
@@ -412,9 +410,6 @@
 
     CHECK(mStarted);
 
-    delete mGroup;
-    mGroup = NULL;
-
     mStarted = false;
 
     return AMEDIA_OK;
@@ -433,10 +428,10 @@
 }
 
 media_status_t WAVSource::read(
-        MediaBufferBase **out, const ReadOptions *options) {
+        MediaBufferHelperV3 **out, const ReadOptions *options) {
     *out = NULL;
 
-    if (options != nullptr && options->getNonBlocking() && !mGroup->has_buffers()) {
+    if (options != nullptr && options->getNonBlocking() && !mBufferGroup->has_buffers()) {
         return AMEDIA_ERROR_WOULD_BLOCK;
     }
 
@@ -459,10 +454,10 @@
         mCurrentPos = pos + mOffset;
     }
 
-    MediaBufferBase *buffer;
-    status_t err = mGroup->acquire_buffer(&buffer);
+    MediaBufferHelperV3 *buffer;
+    media_status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
-        return AMEDIA_ERROR_UNKNOWN;
+        return err;
     }
 
     // maxBytesToRead may be reduced so that in-place data conversion will fit in buffer size.
@@ -573,9 +568,10 @@
                 / (mNumChannels * bytesPerSample) / mSampleRate;
     }
 
-    buffer->meta_data().setInt64(kKeyTime, timeStampUs);
+    AMediaFormat *meta = buffer->meta_data();
+    AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_TIME_US, timeStampUs);
+    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
 
-    buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
     mCurrentPos += n;
 
     *out = buffer;
@@ -585,13 +581,13 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-static CMediaExtractorV2* CreateExtractor(
+static CMediaExtractorV3* CreateExtractor(
         CDataSource *source,
         void *) {
-    return wrapV2(new WAVExtractor(new DataSourceHelper(source)));
+    return wrapV3(new WAVExtractor(new DataSourceHelper(source)));
 }
 
-static CreatorFuncV2 Sniff(
+static CreatorFuncV3 Sniff(
         CDataSource *source,
         float *confidence,
         void **,
@@ -625,11 +621,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT,
+        EXTRACTORDEF_VERSION_CURRENT + 1,
         UUID("7d613858-5837-4a38-84c5-332d1cddee27"),
         1, // version
         "WAV Extractor",
-        { .v2 = Sniff }
+        { .v3 = Sniff }
     };
 }
 
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/WAVExtractor.h
index ce34881..9b7dfde 100644
--- a/media/extractors/wav/WAVExtractor.h
+++ b/media/extractors/wav/WAVExtractor.h
@@ -29,12 +29,12 @@
 struct CDataSource;
 class String8;
 
-class WAVExtractor : public MediaExtractorPluginHelperV2 {
+class WAVExtractor : public MediaExtractorPluginHelperV3 {
 public:
     explicit WAVExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV2 *getTrack(size_t index);
+    virtual MediaTrackHelperV3 *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index 620edc7..9b32543 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -72,7 +72,6 @@
             ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
             return AAudioConvert_androidToAAudioResult(err);
         } else if (stream < 0) {
-            ALOGE("BpAAudioService::client OPEN_STREAM passed stream %d", stream);
             return stream;
         }
         err = configurationOutput.readFromParcel(&reply);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 0a8021a..fffcda0 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -131,7 +131,6 @@
         mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
     }
     if (mServiceStreamHandle < 0) {
-        ALOGE("%s - openStream() returned %d", __func__, mServiceStreamHandle);
         return mServiceStreamHandle;
     }
 
@@ -693,7 +692,6 @@
     }
 
     aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(adjustedFrames, &actualFrames);
-    ALOGD("setBufferSize() req = %d => %d", requestedFrames, actualFrames);
     if (result < 0) {
         return result;
     } else {
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index e272f2a..2fb3986 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -241,7 +241,7 @@
 {
     AudioStream *audioStream = nullptr;
     // Please leave these logs because they are very helpful when debugging.
-    ALOGD("AAudioStreamBuilder_openStream() called ----------------------------------------");
+    ALOGD("%s() called ----------------------------------------", __func__);
     AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
     aaudio_result_t result = streamBuilder->build(&audioStream);
     ALOGD("AAudioStreamBuilder_openStream() returns %d = %s for (%p) ----------------",
@@ -269,7 +269,7 @@
 {
     aaudio_result_t result = AAUDIO_ERROR_NULL;
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_close(%p) called ---------------", stream);
+    ALOGD("%s(%p) called ---------------", __func__, stream);
     if (audioStream != nullptr) {
         result = audioStream->safeClose();
         // Close will only fail if called illegally, for example, from a callback.
@@ -284,7 +284,7 @@
     // We're potentially freeing `stream` above, so its use here makes some
     // static analysis tools unhappy. Casting to uintptr_t helps assure
     // said tools that we're not doing anything bad here.
-    ALOGD("AAudioStream_close(%#" PRIxPTR ") returned %d ---------",
+    ALOGD("%s(%#" PRIxPTR ") returned %d ---------", __func__,
           reinterpret_cast<uintptr_t>(stream), result);
     return result;
 }
@@ -292,30 +292,30 @@
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestStart(%p) called --------------", stream);
+    ALOGD("%s(%p) called --------------", __func__, stream);
     aaudio_result_t result = audioStream->systemStart();
-    ALOGD("AAudioStream_requestStart(%p) returned %d ---------", stream, result);
+    ALOGD("%s(%p) returned %d ---------", __func__, stream, result);
     return result;
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestPause(%p)", stream);
+    ALOGD("%s(%p) called", __func__, stream);
     return audioStream->systemPause();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestFlush(%p)", stream);
+    ALOGD("%s(%p) called", __func__, stream);
     return audioStream->safeFlush();
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestStop(%p)", stream);
+    ALOGD("%s(%p) called", __func__, stream);
     return audioStream->systemStop();
 }
 
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 358021b..391af29 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -37,7 +37,6 @@
 }
 
 AudioStream::~AudioStream() {
-    ALOGD("destroying %p, state = %s", this, AAudio_convertStreamStateToText(getState()));
     // If the stream is deleted when OPEN or in use then audio resources will leak.
     // This would indicate an internal error. So we want to find this ASAP.
     LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 9885cb0..66e247f 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -38,7 +38,7 @@
 
 fifo_frames_t FifoControllerBase::getReadIndex() {
     // % works with non-power of two sizes
-    return (fifo_frames_t) (getReadCounter() % mCapacity);
+    return (fifo_frames_t) ((uint64_t)getReadCounter() % mCapacity);
 }
 
 void FifoControllerBase::advanceReadIndex(fifo_frames_t numFrames) {
@@ -51,7 +51,7 @@
 
 fifo_frames_t FifoControllerBase::getWriteIndex() {
     // % works with non-power of two sizes
-    return (fifo_frames_t) (getWriteCounter() % mCapacity);
+    return (fifo_frames_t) ((uint64_t)getWriteCounter() % mCapacity);
 }
 
 void FifoControllerBase::advanceWriteIndex(fifo_frames_t numFrames) {
diff --git a/media/libaaudio/tests/test_atomic_fifo.cpp b/media/libaaudio/tests/test_atomic_fifo.cpp
index a09b74c..130ef43 100644
--- a/media/libaaudio/tests/test_atomic_fifo.cpp
+++ b/media/libaaudio/tests/test_atomic_fifo.cpp
@@ -23,12 +23,12 @@
 #include "fifo/FifoController.h"
 
 using android::fifo_frames_t;
+using android::fifo_counter_t;
 using android::FifoController;
 using android::FifoBuffer;
 using android::WrappingBuffer;
 
-//void foo() {
-TEST(test_fifi_controller, fifo_indices) {
+TEST(test_fifo_controller, fifo_indices) {
     // Values are arbitrary primes designed to trigger edge cases.
     constexpr int capacity = 83;
     constexpr int threshold = 47;
@@ -73,18 +73,59 @@
     ASSERT_EQ(threshold - advance2, fifoController.getEmptyFramesAvailable());
 }
 
+TEST(test_fifo_controller, fifo_wrap_around_zero) {
+    constexpr int capacity = 7; // arbitrary prime
+    constexpr int threshold = capacity;
+    FifoController fifoController(capacity, threshold);
+    ASSERT_EQ(capacity, fifoController.getCapacity());
+    ASSERT_EQ(threshold, fifoController.getThreshold());
+
+    fifoController.setReadCounter(-10); // a bit less than negative capacity
+    for (int i = 0; i < 20; i++) {
+        EXPECT_EQ(i - 10, fifoController.getReadCounter());
+        EXPECT_GE(fifoController.getReadIndex(), 0);
+        EXPECT_LT(fifoController.getReadIndex(), capacity);
+        fifoController.advanceReadIndex(1);
+    }
+
+    fifoController.setWriteCounter(-10);
+    for (int i = 0; i < 20; i++) {
+        EXPECT_EQ(i - 10, fifoController.getWriteCounter());
+        EXPECT_GE(fifoController.getWriteIndex(), 0);
+        EXPECT_LT(fifoController.getWriteIndex(), capacity);
+        fifoController.advanceWriteIndex(1);
+    }
+}
+
+
 // TODO consider using a template for other data types.
+
+// Create a big array and then use a region in the middle for the  unit tests.
+// Then we can scan the rest of the array to see if it got clobbered.
+static constexpr fifo_frames_t kBigArraySize = 1024;
+static constexpr fifo_frames_t kFifoDataOffset = 128; // starting index of FIFO data
+static constexpr int16_t       kSafeDataValue = 0x7654; // original value of BigArray
+
 class TestFifoBuffer {
 public:
     explicit TestFifoBuffer(fifo_frames_t capacity, fifo_frames_t threshold = 0)
-        : mFifoBuffer(sizeof(int16_t), capacity) {
+        : mFifoBuffer(sizeof(int16_t), capacity,
+                      &mReadIndex,
+                      &mWriteIndex,
+                      &mVeryBigArray[kFifoDataOffset]) // address of start of FIFO data
+    {
+
+        // Assume a frame is one int16_t.
         // For reading and writing.
-        mData = new int16_t[capacity];
         if (threshold <= 0) {
             threshold = capacity;
         }
         mFifoBuffer.setThreshold(threshold);
         mThreshold = threshold;
+
+        for (fifo_frames_t i = 0; i < kBigArraySize; i++) {
+            mVeryBigArray[i] = kSafeDataValue;
+        }
     }
 
     void checkMisc() {
@@ -92,26 +133,70 @@
         ASSERT_EQ(mThreshold, mFifoBuffer.getThreshold());
     }
 
+    void verifyAddressInRange(void *p, void *valid, size_t numBytes) {
+        uintptr_t p_int = (uintptr_t) p;
+        uintptr_t valid_int = (uintptr_t) valid;
+        EXPECT_GE(p_int, valid_int);
+        EXPECT_LT(p_int, (valid_int + numBytes));
+    }
+
+    void verifyStorageIntegrity() {
+        for (fifo_frames_t i = 0; i < kFifoDataOffset; i++) {
+            EXPECT_EQ(mVeryBigArray[i], kSafeDataValue);
+        }
+        fifo_frames_t firstFrameAfter = kFifoDataOffset + mFifoBuffer.getBufferCapacityInFrames();
+        for (fifo_frames_t i = firstFrameAfter; i < kBigArraySize; i++) {
+            EXPECT_EQ(mVeryBigArray[i], kSafeDataValue);
+        }
+    }
+
     // Verify that the available frames in each part add up correctly.
-    void checkWrappingBuffer() {
+    void verifyWrappingBuffer() {
         WrappingBuffer wrappingBuffer;
+
+
+        // Does the sum of the two parts match the available value returned?
+        // For EmptyRoom
         fifo_frames_t framesAvailable =
                 mFifoBuffer.getEmptyFramesAvailable();
         fifo_frames_t wrapAvailable = mFifoBuffer.getEmptyRoomAvailable(&wrappingBuffer);
         EXPECT_EQ(framesAvailable, wrapAvailable);
         fifo_frames_t bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
         EXPECT_EQ(framesAvailable, bothAvailable);
-
+        // For FullData
         framesAvailable =
                 mFifoBuffer.getFullFramesAvailable();
         wrapAvailable = mFifoBuffer.getFullDataAvailable(&wrappingBuffer);
         EXPECT_EQ(framesAvailable, wrapAvailable);
         bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
         EXPECT_EQ(framesAvailable, bothAvailable);
+
+        // Are frame counts in legal range?
+        fifo_frames_t capacity = mFifoBuffer.getBufferCapacityInFrames();
+        EXPECT_GE(wrappingBuffer.numFrames[0], 0);
+        EXPECT_LE(wrappingBuffer.numFrames[0], capacity);
+        EXPECT_GE(wrappingBuffer.numFrames[1], 0);
+        EXPECT_LE(wrappingBuffer.numFrames[1], capacity);
+
+        // Are addresses within the FIFO data area?
+        size_t validBytes = capacity * sizeof(int16_t);
+        if (wrappingBuffer.numFrames[0]) {
+            verifyAddressInRange(wrappingBuffer.data[0], mFifoStorage, validBytes);
+            uint8_t *last = ((uint8_t *)wrappingBuffer.data[0])
+                            + mFifoBuffer.convertFramesToBytes(wrappingBuffer.numFrames[0]) - 1;
+            verifyAddressInRange(last, mFifoStorage, validBytes);
+        }
+        if (wrappingBuffer.numFrames[1]) {
+            verifyAddressInRange(wrappingBuffer.data[1], mFifoStorage, validBytes);
+            uint8_t *last = ((uint8_t *)wrappingBuffer.data[1])
+                            + mFifoBuffer.convertFramesToBytes(wrappingBuffer.numFrames[1]) - 1;
+            verifyAddressInRange(last, mFifoStorage, validBytes);
+        }
+
     }
 
     // Write data but do not overflow.
-    void writeData(fifo_frames_t numFrames) {
+    void writeMultipleDataFrames(fifo_frames_t numFrames) {
         fifo_frames_t framesAvailable =
                 mFifoBuffer.getEmptyFramesAvailable();
         fifo_frames_t framesToWrite = std::min(framesAvailable, numFrames);
@@ -122,8 +207,8 @@
         ASSERT_EQ(framesToWrite, actual);
     }
 
-    // Read data but do not underflow.
-    void verifyData(fifo_frames_t numFrames) {
+    // Read whatever data is available, Do not underflow.
+    void verifyMultipleDataFrames(fifo_frames_t numFrames) {
         fifo_frames_t framesAvailable =
                 mFifoBuffer.getFullFramesAvailable();
         fifo_frames_t framesToRead = std::min(framesAvailable, numFrames);
@@ -134,20 +219,35 @@
         }
     }
 
+    // Read specified number of frames
+    void verifyRequestedData(fifo_frames_t numFrames) {
+        fifo_frames_t framesAvailable =
+                mFifoBuffer.getFullFramesAvailable();
+        ASSERT_LE(numFrames, framesAvailable);
+        fifo_frames_t framesToRead = std::min(framesAvailable, numFrames);
+        fifo_frames_t actual = mFifoBuffer.read(mData, framesToRead);
+        ASSERT_EQ(actual, numFrames);
+        for (int i = 0; i < actual; i++) {
+            ASSERT_EQ(mNextVerifyIndex++, mData[i]);
+        }
+    }
+
     // Wrap around the end of the buffer.
     void checkWrappingWriteRead() {
         constexpr int frames1 = 43;
         constexpr int frames2 = 15;
 
-        writeData(frames1);
-        checkWrappingBuffer();
-        verifyData(frames1);
-        checkWrappingBuffer();
+        writeMultipleDataFrames(frames1);
+        verifyWrappingBuffer();
+        verifyRequestedData(frames1);
+        verifyWrappingBuffer();
 
-        writeData(frames2);
-        checkWrappingBuffer();
-        verifyData(frames2);
-        checkWrappingBuffer();
+        writeMultipleDataFrames(frames2);
+        verifyWrappingBuffer();
+        verifyRequestedData(frames2);
+        verifyWrappingBuffer();
+
+        verifyStorageIntegrity();
     }
 
     // Write and Read a specific amount of data.
@@ -156,10 +256,12 @@
         // Wrap around with the smaller region in the second half.
         const int frames1 = capacity - 4;
         const int frames2 = 7; // arbitrary, small
-        writeData(frames1);
-        verifyData(frames1);
-        writeData(frames2);
-        verifyData(frames2);
+        writeMultipleDataFrames(frames1);
+        verifyRequestedData(frames1);
+        writeMultipleDataFrames(frames2);
+        verifyRequestedData(frames2);
+
+        verifyStorageIntegrity();
     }
 
     // Write and Read a specific amount of data.
@@ -168,10 +270,12 @@
         // Wrap around with the larger region in the second half.
         const int frames1 = capacity - 4;
         const int frames2 = capacity - 9; // arbitrary, large
-        writeData(frames1);
-        verifyData(frames1);
-        writeData(frames2);
-        verifyData(frames2);
+        writeMultipleDataFrames(frames1);
+        verifyRequestedData(frames1);
+        writeMultipleDataFrames(frames2);
+        verifyRequestedData(frames2);
+
+        verifyStorageIntegrity();
     }
 
     // Randomly read or write up to the maximum amount of data.
@@ -180,30 +284,67 @@
             fifo_frames_t framesEmpty =
                     mFifoBuffer.getEmptyFramesAvailable();
             fifo_frames_t numFrames = (fifo_frames_t)(drand48() * framesEmpty);
-            writeData(numFrames);
+            writeMultipleDataFrames(numFrames);
 
             fifo_frames_t framesFull =
                     mFifoBuffer.getFullFramesAvailable();
             numFrames = (fifo_frames_t)(drand48() * framesFull);
-            verifyData(numFrames);
+            verifyMultipleDataFrames(numFrames);
         }
+
+        verifyStorageIntegrity();
+    }
+
+    // Write and Read a specific amount of data.
+    void checkNegativeCounters() {
+        fifo_counter_t counter = -9876;
+        mFifoBuffer.setWriteCounter(counter);
+        mFifoBuffer.setReadCounter(counter);
+        checkWrappingWriteRead();
+    }
+
+    // Wrap over the boundary at 0x7FFFFFFFFFFFFFFF
+    // Note that the behavior of a signed overflow is technically undefined.
+    void checkHalfWrap() {
+        fifo_counter_t counter = INT64_MAX - 10;
+        mFifoBuffer.setWriteCounter(counter);
+        mFifoBuffer.setReadCounter(counter);
+        ASSERT_GT(mFifoBuffer.getWriteCounter(), 0);
+        checkWrappingWriteRead();
+        ASSERT_LT(mFifoBuffer.getWriteCounter(), 0); // did we wrap past INT64_MAX?
+    }
+
+    // Wrap over the boundary at 0xFFFFFFFFFFFFFFFF
+    void checkFullWrap() {
+        fifo_counter_t counter = -10;
+        mFifoBuffer.setWriteCounter(counter);
+        mFifoBuffer.setReadCounter(counter);
+        ASSERT_LT(mFifoBuffer.getWriteCounter(), 0);
+        writeMultipleDataFrames(20);
+        ASSERT_GT(mFifoBuffer.getWriteCounter(), 0); // did we wrap past zero?
+        verifyStorageIntegrity();
     }
 
     FifoBuffer     mFifoBuffer;
-    int16_t       *mData;
     fifo_frames_t  mNextWriteIndex = 0;
     fifo_frames_t  mNextVerifyIndex = 0;
     fifo_frames_t  mThreshold;
+
+    fifo_counter_t mReadIndex = 0;
+    fifo_counter_t mWriteIndex = 0;
+    int16_t        mVeryBigArray[kBigArraySize]; // Use the middle of this array for the FIFO.
+    int16_t       *mFifoStorage = &mVeryBigArray[kFifoDataOffset]; // Start here for storage.
+    int16_t        mData[kBigArraySize]{};
 };
 
-TEST(test_fifo_buffer, fifo_read_write) {
+TEST(test_fifo_buffer, fifo_write_read) {
     constexpr int capacity = 51; // arbitrary
     TestFifoBuffer tester(capacity);
     tester.checkMisc();
     tester.checkWriteRead();
 }
 
-TEST(test_fifo_buffer, fifo_wrapping_read_write) {
+TEST(test_fifo_buffer, fifo_wrapping_write_read) {
     constexpr int capacity = 59; // arbitrary, a little bigger this time
     TestFifoBuffer tester(capacity);
     tester.checkWrappingWriteRead();
@@ -227,3 +368,21 @@
     TestFifoBuffer tester(capacity, threshold);
     tester.checkRandomWriteRead();
 }
+
+TEST(test_fifo_buffer, fifo_negative_counters) {
+    constexpr int capacity = 49; // arbitrary
+    TestFifoBuffer tester(capacity);
+    tester.checkNegativeCounters();
+}
+
+TEST(test_fifo_buffer, fifo_half_wrap) {
+    constexpr int capacity = 57; // arbitrary
+    TestFifoBuffer tester(capacity);
+    tester.checkHalfWrap();
+}
+
+TEST(test_fifo_buffer, fifo_full_wrap) {
+    constexpr int capacity = 57; // arbitrary
+    TestFifoBuffer tester(capacity);
+    tester.checkFullWrap();
+}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 038c854..3223647 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -211,7 +211,7 @@
         mBufferMemory.clear();
         IPCThreadState::self()->flushCommands();
         ALOGV("%s(%d): releasing session id %d",
-                __func__, mId, mSessionId);
+                __func__, mPortId, mSessionId);
         AudioSystem::releaseAudioSessionId(mSessionId, -1 /*pid*/);
     }
 }
@@ -239,7 +239,7 @@
     pid_t callingPid;
     pid_t myPid;
 
-    // Note mId is not valid until the track is created, so omit mId in ALOG for set.
+    // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
           "uid %d, pid %d",
@@ -356,7 +356,7 @@
     // create the IAudioRecord
     status = createRecord_l(0 /*epoch*/, mOpPackageName);
 
-    ALOGV("%s(%d): status %d", __func__, mId, status);
+    ALOGV("%s(%d): status %d", __func__, mPortId, status);
 
     if (status != NO_ERROR) {
         if (mAudioRecordThread != 0) {
@@ -393,7 +393,7 @@
 
 status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
 {
-    ALOGV("%s(%d): sync event %d trigger session %d", __func__, mId, event, triggerSession);
+    ALOGV("%s(%d): sync event %d trigger session %d", __func__, mPortId, event, triggerSession);
 
     AutoMutex lock(mLock);
     if (mActive) {
@@ -434,7 +434,7 @@
 
     if (status != NO_ERROR) {
         mActive = false;
-        ALOGE("%s(%d): status %d", __func__, mId, status);
+        ALOGE("%s(%d): status %d", __func__, mPortId, status);
     } else {
         sp<AudioRecordThread> t = mAudioRecordThread;
         if (t != 0) {
@@ -458,7 +458,7 @@
 void AudioRecord::stop()
 {
     AutoMutex lock(mLock);
-    ALOGV("%s(%d): mActive:%d\n", __func__, mId, mActive);
+    ALOGV("%s(%d): mActive:%d\n", __func__, mPortId, mActive);
     if (!mActive) {
         return;
     }
@@ -638,7 +638,7 @@
 
     result.append(" AudioRecord::dump\n");
     result.appendFormat("  id(%d) status(%d), active(%d), session Id(%d)\n",
-                        mId, mStatus, mActive, mSessionId);
+                        mPortId, mStatus, mActive, mSessionId);
     result.appendFormat("  flags(%#x), req. flags(%#x), audio source(%d)\n",
                         mFlags, mOrigFlags, mAttributes.source);
     result.appendFormat("  format(%#x), channel mask(%#x), channel count(%u), sample rate(%u)\n",
@@ -680,7 +680,7 @@
     status_t status;
 
     if (audioFlinger == 0) {
-        ALOGE("%s(%d): Could not get audioflinger", __func__, mId);
+        ALOGE("%s(%d): Could not get audioflinger", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -708,7 +708,7 @@
             (mTransfer == TRANSFER_OBTAIN);
         if (!useCaseAllowed) {
             ALOGW("%s(%d): AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
-                  __func__, mId,
+                  __func__, mPortId,
                   convertTransferToText(mTransfer));
             mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
                     AUDIO_INPUT_FLAG_RAW));
@@ -744,7 +744,7 @@
 
     if (status != NO_ERROR) {
         ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
-              __func__, mId, status);
+              __func__, mPortId, status);
         goto exit;
     }
     ALOG_ASSERT(record != 0);
@@ -755,7 +755,7 @@
     mAwaitBoost = false;
     if (output.flags & AUDIO_INPUT_FLAG_FAST) {
         ALOGI("%s(%d): AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
-              __func__, mId,
+              __func__, mPortId,
               mReqFrameCount, output.frameCount);
         mAwaitBoost = true;
     }
@@ -765,13 +765,13 @@
     mSampleRate = output.sampleRate;
 
     if (output.cblk == 0) {
-        ALOGE("%s(%d): Could not get control block", __func__, mId);
+        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
     iMemPointer = output.cblk ->pointer();
     if (iMemPointer == NULL) {
-        ALOGE("%s(%d): Could not get control block pointer", __func__, mId);
+        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -786,7 +786,7 @@
     } else {
         buffers = output.buffers->pointer();
         if (buffers == NULL) {
-            ALOGE("%s(%d): Could not get buffer pointer", __func__, mId);
+            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
             status = NO_INIT;
             goto exit;
         }
@@ -800,14 +800,14 @@
     mAudioRecord = record;
     mCblkMemory = output.cblk;
     mBufferMemory = output.buffers;
-    mId = output.trackId;
+    mPortId = output.portId;
     IPCThreadState::self()->flushCommands();
 
     mCblk = cblk;
     // note that output.frameCount is the (possibly revised) value of mReqFrameCount
     if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
         ALOGW("%s(%d): Requested frameCount %zu but received frameCount %zu",
-              __func__, mId,
+              __func__, mPortId,
               mReqFrameCount,  output.frameCount);
     }
 
@@ -815,7 +815,7 @@
     // The computation is done on server side.
     if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
         ALOGW("%s(%d): Server adjusted notificationFrames from %u to %zu for frameCount %zu",
-                __func__, mId,
+                __func__, mPortId,
                 mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
     }
     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
@@ -883,7 +883,7 @@
         timeout.tv_nsec = (long) (ms % 1000) * 1000000;
         requested = &timeout;
     } else {
-        ALOGE("%s(%d): invalid waitCount %d", __func__, mId, waitCount);
+        ALOGE("%s(%d): invalid waitCount %d", __func__, mPortId, waitCount);
         requested = NULL;
     }
     return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
@@ -993,7 +993,7 @@
         // sanity-check. user is most-likely passing an error code, and it would
         // make the return value ambiguous (actualSize vs error).
         ALOGE("%s(%d) (buffer=%p, size=%zu (%zu)",
-                __func__, mId, buffer, userSize, userSize);
+                __func__, mPortId, buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
@@ -1050,7 +1050,7 @@
             pollUs <<= 1;
         } while (tryCounter-- > 0);
         if (tryCounter < 0) {
-            ALOGE("%s(%d): did not receive expected priority boost on time", __func__, mId);
+            ALOGE("%s(%d): did not receive expected priority boost on time", __func__, mPortId);
         }
         // Run again immediately
         return 0;
@@ -1174,7 +1174,7 @@
         timeout.tv_sec = ns / 1000000000LL;
         timeout.tv_nsec = ns % 1000000000LL;
         ALOGV("%s(%d): timeout %ld.%03d",
-                __func__, mId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
+                __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
         requested = &timeout;
     }
 
@@ -1187,17 +1187,17 @@
         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
                 "%s(%d): obtainBuffer() err=%d frameCount=%zu",
-                __func__, mId, err, audioBuffer.frameCount);
+                __func__, mPortId, err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
         ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
-                __func__, mId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
+                __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
                 break;
             }
             ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
-                    __func__, mId, err);
+                    __func__, mPortId, err);
             return NS_NEVER;
         }
 
@@ -1220,7 +1220,7 @@
         // Sanity check on returned size
         if (ssize_t(readSize) < 0 || readSize > reqSize) {
             ALOGE("%s(%d):  EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
-                    __func__, mId, reqSize, ssize_t(readSize));
+                    __func__, mPortId, reqSize, ssize_t(readSize));
             return NS_NEVER;
         }
 
@@ -1280,7 +1280,7 @@
 
 status_t AudioRecord::restoreRecord_l(const char *from)
 {
-    ALOGW("%s(%d): dead IAudioRecord, creating a new one from %s()", __func__, mId, from);
+    ALOGW("%s(%d): dead IAudioRecord, creating a new one from %s()", __func__, mPortId, from);
     ++mSequence;
 
     const int INITIAL_RETRIES = 3;
@@ -1311,7 +1311,7 @@
     }
 
     if (result != NO_ERROR) {
-        ALOGW("%s(%d): failed status %d, retries %d", __func__, mId, result, retries);
+        ALOGW("%s(%d): failed status %d, retries %d", __func__, mPortId, result, retries);
         if (--retries > 0) {
             // leave time for an eventual race condition to clear before retrying
             usleep(500000);
@@ -1330,18 +1330,18 @@
 status_t AudioRecord::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
 {
     if (callback == 0) {
-        ALOGW("%s(%d): adding NULL callback!", __func__, mId);
+        ALOGW("%s(%d): adding NULL callback!", __func__, mPortId);
         return BAD_VALUE;
     }
     AutoMutex lock(mLock);
     if (mDeviceCallback.unsafe_get() == callback.get()) {
-        ALOGW("%s(%d): adding same callback!", __func__, mId);
+        ALOGW("%s(%d): adding same callback!", __func__, mPortId);
         return INVALID_OPERATION;
     }
     status_t status = NO_ERROR;
     if (mInput != AUDIO_IO_HANDLE_NONE) {
         if (mDeviceCallback != 0) {
-            ALOGW("%s(%d): callback already present!", __func__, mId);
+            ALOGW("%s(%d): callback already present!", __func__, mPortId);
             AudioSystem::removeAudioDeviceCallback(this, mInput);
         }
         status = AudioSystem::addAudioDeviceCallback(this, mInput);
@@ -1354,12 +1354,12 @@
         const sp<AudioSystem::AudioDeviceCallback>& callback)
 {
     if (callback == 0) {
-        ALOGW("%s(%d): removing NULL callback!", __func__, mId);
+        ALOGW("%s(%d): removing NULL callback!", __func__, mPortId);
         return BAD_VALUE;
     }
     AutoMutex lock(mLock);
     if (mDeviceCallback.unsafe_get() != callback.get()) {
-        ALOGW("%s(%d): removing different callback!", __func__, mId);
+        ALOGW("%s(%d): removing different callback!", __func__, mPortId);
         return INVALID_OPERATION;
     }
     mDeviceCallback.clear();
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index ec36ed7..3e91717 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -917,11 +917,11 @@
             config, flags, selectedDeviceId, portId);
 }
 
-status_t AudioSystem::startInput(audio_port_handle_t portId, bool *silenced)
+status_t AudioSystem::startInput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startInput(portId, silenced);
+    return aps->startInput(portId);
 }
 
 status_t AudioSystem::stopInput(audio_port_handle_t portId)
@@ -1315,6 +1315,13 @@
     return aps->setA11yServicesUids(uids);
 }
 
+bool AudioSystem::isHapticPlaybackSupported()
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+    return aps->isHapticPlaybackSupported();
+}
+
 
 // ---------------------------------------------------------------------------
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index df9aea6..1f6dd60 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -30,9 +30,11 @@
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
 #include <media/IAudioFlinger.h>
+#include <media/IAudioPolicyService.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicyHelper.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/AudioSystem.h>
 #include <media/MediaAnalyticsItem.h>
 #include <media/TypeConverter.h>
 
@@ -64,7 +66,7 @@
 
 static int64_t convertTimespecToUs(const struct timespec &tv)
 {
-    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
+    return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
 }
 
 // TODO move to audio_utils.
@@ -157,6 +159,15 @@
     return NO_ERROR;
 }
 
+// static
+bool AudioTrack::isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) {
+    ALOGV("%s()", __FUNCTION__);
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+    return aps->isDirectOutputSupported(config, attributes);
+}
+
 // ---------------------------------------------------------------------------
 
 static std::string audioContentTypeString(audio_content_type_t value) {
@@ -344,7 +355,7 @@
         mSharedBuffer.clear();
         IPCThreadState::self()->flushCommands();
         ALOGV("%s(%d), releasing session id %d from %d on behalf of %d",
-                __func__, mId,
+                __func__, mPortId,
                 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
     }
@@ -377,7 +388,7 @@
     pid_t callingPid;
     pid_t myPid;
 
-    // Note mId is not valid until the track is created, so omit mId in ALOG for set.
+    // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
           __func__,
@@ -465,16 +476,7 @@
                 __func__,
                  mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
         mStreamType = AUDIO_STREAM_DEFAULT;
-        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
-            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
-        }
-        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
-            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
-        }
-        // check deep buffer after flags have been modified above
-        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
-            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-        }
+        audio_attributes_flags_to_audio_output_flags(mAttributes.flags, flags);
     }
 
     // these below should probably come from the audioFlinger too...
@@ -658,7 +660,7 @@
 status_t AudioTrack::start()
 {
     AutoMutex lock(mLock);
-    ALOGV("%s(%d): prior state:%s", __func__, mId, stateToString(mState));
+    ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
 
     if (mState == STATE_ACTIVE) {
         return INVALID_OPERATION;
@@ -699,7 +701,7 @@
             // It is possible since flush and stop are asynchronous that the server
             // is still active at this point.
             ALOGV("%s(%d): server read:%lld  cumulative flushed:%lld  client written:%lld",
-                    __func__, mId,
+                    __func__, mPortId,
                     (long long)(mFramesWrittenServerOffset
                             + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
                     (long long)mStartEts.mFlushed,
@@ -760,7 +762,7 @@
         // Start our local VolumeHandler for restoration purposes.
         mVolumeHandler->setStarted();
     } else {
-        ALOGE("%s(%d): status %d", __func__, mId, status);
+        ALOGE("%s(%d): status %d", __func__, mPortId, status);
         mState = previousState;
         if (t != 0) {
             if (previousState != STATE_STOPPING) {
@@ -778,7 +780,7 @@
 void AudioTrack::stop()
 {
     AutoMutex lock(mLock);
-    ALOGV("%s(%d): prior state:%s", __func__, mId, stateToString(mState));
+    ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
 
     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
         return;
@@ -789,7 +791,7 @@
     } else {
         mState = STATE_STOPPED;
         ALOGD_IF(mSharedBuffer == nullptr,
-                "%s(%d): called with %u frames delivered", __func__, mId, mReleased.value());
+                "%s(%d): called with %u frames delivered", __func__, mPortId, mReleased.value());
         mReleased = 0;
     }
 
@@ -830,7 +832,7 @@
 void AudioTrack::flush()
 {
     AutoMutex lock(mLock);
-    ALOGV("%s(%d): prior state:%s", __func__, mId, stateToString(mState));
+    ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
 
     if (mSharedBuffer != 0) {
         return;
@@ -863,7 +865,7 @@
 void AudioTrack::pause()
 {
     AutoMutex lock(mLock);
-    ALOGV("%s(%d): prior state:%s", __func__, mId, stateToString(mState));
+    ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
 
     if (mState == STATE_ACTIVE) {
         mState = STATE_PAUSED;
@@ -891,7 +893,7 @@
             uint32_t halFrames;
             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
             ALOGV("%s(%d): for offload, cache current position %u",
-                    __func__, mId, mPausedPosition);
+                    __func__, mPortId, mPausedPosition);
         }
     }
 }
@@ -945,7 +947,7 @@
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
     AutoMutex lock(mLock);
-    ALOGV("%s(%d): prior state:%s rate:%u", __func__, mId, stateToString(mState), rate);
+    ALOGV("%s(%d): prior state:%s rate:%u", __func__, mPortId, stateToString(mState), rate);
 
     if (rate == mSampleRate) {
         return NO_ERROR;
@@ -1013,7 +1015,7 @@
     }
 
     ALOGV("%s(%d): mSampleRate:%u  mSpeed:%f  mPitch:%f",
-            __func__, mId, mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
+            __func__, mPortId, mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
     // pitch is emulated by adjusting speed and sampleRate
     const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
     const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
@@ -1023,17 +1025,17 @@
     playbackRateTemp.mPitch = effectivePitch;
 
     ALOGV("%s(%d) (effective) mSampleRate:%u  mSpeed:%f  mPitch:%f",
-            __func__, mId, effectiveRate, effectiveSpeed, effectivePitch);
+            __func__, mPortId, effectiveRate, effectiveSpeed, effectivePitch);
 
     if (!isAudioPlaybackRateValid(playbackRateTemp)) {
         ALOGW("%s(%d) (%f, %f) failed (effective rate out of bounds)",
-                __func__, mId, playbackRate.mSpeed, playbackRate.mPitch);
+                __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
     // Check if the buffer size is compatible.
     if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
         ALOGW("%s(%d) (%f, %f) failed (buffer size)",
-                __func__, mId, playbackRate.mSpeed, playbackRate.mPitch);
+                __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
 
@@ -1041,13 +1043,13 @@
     if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
             (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
         ALOGW("%s(%d) (%f, %f) failed. Resample rate exceeds max accepted value",
-                __func__, mId, playbackRate.mSpeed, playbackRate.mPitch);
+                __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
 
     if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
         ALOGW("%s(%d) (%f, %f) failed. Resample rate below min accepted value",
-                __func__, mId, playbackRate.mSpeed, playbackRate.mPitch);
+                __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
     mPlaybackRate = playbackRate;
@@ -1249,7 +1251,7 @@
 
         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
             ALOGV("%s(%d): called in paused state, return cached position %u",
-                __func__, mId, mPausedPosition);
+                __func__, mPortId, mPausedPosition);
             *position = mPausedPosition;
             return NO_ERROR;
         }
@@ -1395,7 +1397,7 @@
 {
     status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
     if (status != NO_ERROR) {
-        ALOGW("%s(%d): getLatency(%d) failed status %d", __func__, mId, mOutput, status);
+        ALOGW("%s(%d): getLatency(%d) failed status %d", __func__, mPortId, mOutput, status);
     } else {
         // FIXME don't believe this lie
         mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
@@ -1425,7 +1427,7 @@
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
         ALOGE("%s(%d): Could not get audioflinger",
-                __func__, mId);
+                __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -1451,7 +1453,7 @@
         if (!fastAllowed) {
             ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
                   " not shared buffer and transfer = %s",
-                  __func__, mId,
+                  __func__, mPortId,
                   convertTransferToText(mTransfer));
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
         }
@@ -1501,7 +1503,7 @@
 
     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
         ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
-                __func__, mId, status, output.outputId);
+                __func__, mPortId, status, output.outputId);
         if (status == NO_ERROR) {
             status = NO_INIT;
         }
@@ -1522,7 +1524,7 @@
     mAfFrameCount = output.afFrameCount;
     mAfSampleRate = output.afSampleRate;
     mAfLatency = output.afLatencyMs;
-    mId = output.trackId;
+    mPortId = output.portId;
 
     mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
 
@@ -1532,13 +1534,13 @@
     // FIXME compare to AudioRecord
     sp<IMemory> iMem = track->getCblk();
     if (iMem == 0) {
-        ALOGE("%s(%d): Could not get control block", __func__, mId);
+        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
     void *iMemPointer = iMem->pointer();
     if (iMemPointer == NULL) {
-        ALOGE("%s(%d): Could not get control block pointer", __func__, mId);
+        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -1558,13 +1560,13 @@
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
             ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
-                  __func__, mId, mReqFrameCount, mFrameCount);
+                  __func__, mPortId, mReqFrameCount, mFrameCount);
             if (!mThreadCanCallJava) {
                 mAwaitBoost = true;
             }
         } else {
             ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
-                  __func__, mId, mReqFrameCount, mFrameCount);
+                  __func__, mPortId, mReqFrameCount, mFrameCount);
         }
     }
     mFlags = output.flags;
@@ -1592,7 +1594,7 @@
     } else {
         buffers = mSharedBuffer->pointer();
         if (buffers == NULL) {
-            ALOGE("%s(%d): Could not get buffer pointer", __func__, mId);
+            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
             status = NO_INIT;
             goto exit;
         }
@@ -1681,7 +1683,7 @@
         timeout.tv_nsec = (long) (ms % 1000) * 1000000;
         requested = &timeout;
     } else {
-        ALOGE("%s(%d): invalid waitCount %d", __func__, mId, waitCount);
+        ALOGE("%s(%d): invalid waitCount %d", __func__, mPortId, waitCount);
         requested = NULL;
     }
     return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
@@ -1792,7 +1794,7 @@
     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
     if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
         ALOGW("%s(%d): releaseBuffer() track %p disabled due to previous underrun, restarting",
-                __func__, mId, this);
+                __func__, mPortId, this);
         // FIXME ignoring status
         mAudioTrack->start();
     }
@@ -1820,7 +1822,7 @@
         // Sanity-check: user is most-likely passing an error code, and it would
         // make the return value ambiguous (actualSize vs error).
         ALOGE("%s(%d): AudioTrack::write(buffer=%p, size=%zu (%zd)",
-                __func__, mId, buffer, userSize, userSize);
+                __func__, mPortId, buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
@@ -1893,7 +1895,7 @@
         } while (tryCounter-- > 0);
         if (tryCounter < 0) {
             ALOGE("%s(%d): did not receive expected priority boost on time",
-                    __func__, mId);
+                    __func__, mPortId);
         }
         // Run again immediately
         return 0;
@@ -2135,7 +2137,7 @@
         timeout.tv_sec = ns / 1000000000LL;
         timeout.tv_nsec = ns % 1000000000LL;
         ALOGV("%s(%d): timeout %ld.%03d",
-                __func__, mId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
+                __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
         requested = &timeout;
     }
 
@@ -2148,11 +2150,11 @@
         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
                 "%s(%d): obtainBuffer() err=%d frameCount=%zu",
-                 __func__, mId, err, audioBuffer.frameCount);
+                 __func__, mPortId, err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
         ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
-                __func__, mId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
+                __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
                     (isOffloaded() && (err == DEAD_OBJECT))) {
@@ -2160,7 +2162,7 @@
                 return 1000000;
             }
             ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
-                    __func__, mId, err);
+                    __func__, mPortId, err);
             return NS_NEVER;
         }
 
@@ -2192,7 +2194,7 @@
         // Sanity check on returned size
         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
             ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
-                    __func__, mId, reqSize, ssize_t(writtenSize));
+                    __func__, mPortId, reqSize, ssize_t(writtenSize));
             return NS_NEVER;
         }
 
@@ -2296,7 +2298,7 @@
 status_t AudioTrack::restoreTrack_l(const char *from)
 {
     ALOGW("%s(%d): dead IAudioTrack, %s, creating a new one from %s()",
-            __func__, mId, isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
+            __func__, mPortId, isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
     ++mSequence;
 
     // refresh the audio configuration cache in this process to make sure we get new
@@ -2354,7 +2356,7 @@
             } else {
                 mStaticProxy->setBufferPosition(bufferPosition);
                 if (bufferPosition == mFrameCount) {
-                    ALOGD("%s(%d): restoring track at end of static buffer", __func__, mId);
+                    ALOGD("%s(%d): restoring track at end of static buffer", __func__, mPortId);
                 }
             }
         }
@@ -2384,7 +2386,7 @@
         mFramesWrittenAtRestore = mFramesWrittenServerOffset;
     }
     if (result != NO_ERROR) {
-        ALOGW("%s(%d): failed status %d, retries %d", __func__, mId, result, retries);
+        ALOGW("%s(%d): failed status %d, retries %d", __func__, mPortId, result, retries);
         if (--retries > 0) {
             // leave time for an eventual race condition to clear before retrying
             usleep(500000);
@@ -2414,7 +2416,7 @@
     //      in which case the use of uint32_t for these counters has bigger issues.
     ALOGE_IF(delta < 0,
             "%s(%d): detected illegal retrograde motion by the server: mServer advanced by %d",
-            __func__, mId, delta);
+            __func__, mPortId, delta);
     mServer = newServer;
     if (delta > 0) { // avoid retrograde
         mPosition += delta;
@@ -2437,7 +2439,7 @@
             "%s(%d): denied "
             "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
             "mFrameCount:%zu < minFrameCount:%zu",
-            __func__, mId,
+            __func__, mPortId,
             mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
             mFrameCount, minFrameCount);
     return allowed;
@@ -2456,7 +2458,7 @@
     param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
     param.addInt(String8(AudioParameter::keyProgramId), programId);
     ALOGV("%s(%d): PresentationId/ProgramId[%s]",
-            __func__, mId, param.toString().string());
+            __func__, mPortId, param.toString().string());
 
     return mAudioTrack->setParameters(param.toString());
 }
@@ -2483,7 +2485,7 @@
     } else {
         // warn only if not an expected restore failure.
         ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
-                "%s(%d): applyVolumeShaper failed: %d", __func__, mId, status);
+                "%s(%d): applyVolumeShaper failed: %d", __func__, mPortId, status);
     }
     return status;
 }
@@ -2525,7 +2527,7 @@
     }
     status_t status = mProxy->getTimestamp(timestamp);
     LOG_ALWAYS_FATAL_IF(status != OK, "%s(%d): status %d not allowed from proxy getTimestamp",
-            __func__, mId, status);
+            __func__, mPortId, status);
     bool found = false;
     timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
     timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
@@ -2569,7 +2571,7 @@
         break; // offloaded tracks handled below
     default:
         LOG_ALWAYS_FATAL("%s(%d): Invalid mState in getTimestamp(): %d",
-               __func__, mId, mState);
+               __func__, mPortId, mState);
         break;
     }
 
@@ -2604,7 +2606,7 @@
                 if (location == ExtendedTimestamp::LOCATION_SERVER) {
                     ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
                             "%s(%d): location moved from kernel to server",
-                            __func__, mId);
+                            __func__, mPortId);
                     // check that the last kernel OK time info exists and the positions
                     // are valid (if they predate the current track, the positions may
                     // be zero or negative).
@@ -2620,7 +2622,7 @@
                             (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
                     ALOGV("%s(%d): frame adjustment:%lld  timestamp:%s",
-                            __func__, mId, (long long)frames, ets.toString().c_str());
+                            __func__, mPortId, (long long)frames, ets.toString().c_str());
                     if (frames >= ets.mPosition[location]) {
                         timestamp.mPosition = 0;
                     } else {
@@ -2629,7 +2631,7 @@
                 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
                     ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
                             "%s(%d): location moved from server to kernel",
-                            __func__, mId);
+                            __func__, mPortId);
                 }
 
                 // We update the timestamp time even when paused.
@@ -2653,7 +2655,7 @@
                 mPreviousLocation = location;
             } else {
                 // right after AudioTrack is started, one may not find a timestamp
-                ALOGV("%s(%d): getBestTimestamp did not find timestamp", __func__, mId);
+                ALOGV("%s(%d): getBestTimestamp did not find timestamp", __func__, mPortId);
             }
         }
         if (status == INVALID_OPERATION) {
@@ -2664,7 +2666,7 @@
             // "zero" for NuPlayer).  We don't convert for track restoration as position
             // does not reset.
             ALOGV("%s(%d): timestamp server offset:%lld restore frames:%lld",
-                    __func__, mId,
+                    __func__, mPortId,
                     (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
             if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
                 status = WOULD_BLOCK;
@@ -2672,7 +2674,7 @@
         }
     }
     if (status != NO_ERROR) {
-        ALOGV_IF(status != WOULD_BLOCK, "%s(%d): getTimestamp error:%#x", __func__, mId, status);
+        ALOGV_IF(status != WOULD_BLOCK, "%s(%d): getTimestamp error:%#x", __func__, mPortId, status);
         return status;
     }
     if (isOffloadedOrDirect_l()) {
@@ -2713,7 +2715,7 @@
                     ALOGW_IF(!mTimestampStartupGlitchReported,
                             "%s(%d): startup glitch detected"
                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
-                            __func__, mId,
+                            __func__, mPortId,
                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
                             timestamp.mPosition);
                     mTimestampStartupGlitchReported = true;
@@ -2783,7 +2785,7 @@
             if (currentTimeNanos < limitNs) {
                 ALOGD("%s(%d): correcting timestamp time for pause, "
                         "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
-                        __func__, mId,
+                        __func__, mPortId,
                         (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
                 timestamp.mTime = convertNsToTimespec(limitNs);
                 currentTimeNanos = limitNs;
@@ -2792,7 +2794,7 @@
             // retrograde check
             if (currentTimeNanos < previousTimeNanos) {
                 ALOGW("%s(%d): retrograde timestamp time corrected, %lld < %lld",
-                        __func__, mId,
+                        __func__, mPortId,
                         (long long)currentTimeNanos, (long long)previousTimeNanos);
                 timestamp.mTime = mPreviousTimestamp.mTime;
                 // currentTimeNanos not used below.
@@ -2806,7 +2808,7 @@
                 // Only report once per position instead of spamming the log.
                 if (!mRetrogradeMotionReported) {
                     ALOGW("%s(%d): retrograde timestamp position corrected, %d = %u - %u",
-                            __func__, mId,
+                            __func__, mPortId,
                             deltaPosition,
                             timestamp.mPosition,
                             mPreviousTimestamp.mPosition);
@@ -2827,7 +2829,7 @@
                 const int64_t computedSampleRate =
                         deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
                 ALOGD("%s(%d): computedSampleRate:%u  sampleRate:%u",
-                        __func__, mId,
+                        __func__, mPortId,
                         (unsigned)computedSampleRate, mSampleRate);
             }
 #endif
@@ -2874,7 +2876,7 @@
 
     result.append(" AudioTrack::dump\n");
     result.appendFormat("  id(%d) status(%d), state(%d), session Id(%d), flags(%#x)\n",
-                        mId, mStatus, mState, mSessionId, mFlags);
+                        mPortId, mStatus, mState, mSessionId, mFlags);
     result.appendFormat("  stream type(%d), left - right volume(%f, %f)\n",
                         (mStreamType == AUDIO_STREAM_DEFAULT) ?
                                 audio_attributes_to_stream_type(&mAttributes) : mStreamType,
@@ -2916,18 +2918,18 @@
 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
 {
     if (callback == 0) {
-        ALOGW("%s(%d): adding NULL callback!", __func__, mId);
+        ALOGW("%s(%d): adding NULL callback!", __func__, mPortId);
         return BAD_VALUE;
     }
     AutoMutex lock(mLock);
     if (mDeviceCallback.unsafe_get() == callback.get()) {
-        ALOGW("%s(%d): adding same callback!", __func__, mId);
+        ALOGW("%s(%d): adding same callback!", __func__, mPortId);
         return INVALID_OPERATION;
     }
     status_t status = NO_ERROR;
     if (mOutput != AUDIO_IO_HANDLE_NONE) {
         if (mDeviceCallback != 0) {
-            ALOGW("%s(%d): callback already present!", __func__, mId);
+            ALOGW("%s(%d): callback already present!", __func__, mPortId);
             AudioSystem::removeAudioDeviceCallback(this, mOutput);
         }
         status = AudioSystem::addAudioDeviceCallback(this, mOutput);
@@ -2940,12 +2942,12 @@
         const sp<AudioSystem::AudioDeviceCallback>& callback)
 {
     if (callback == 0) {
-        ALOGW("%s(%d): removing NULL callback!", __func__, mId);
+        ALOGW("%s(%d): removing NULL callback!", __func__, mPortId);
         return BAD_VALUE;
     }
     AutoMutex lock(mLock);
     if (mDeviceCallback.unsafe_get() != callback.get()) {
-        ALOGW("%s(%d): removing different callback!", __func__, mId);
+        ALOGW("%s(%d): removing different callback!", __func__, mPortId);
         return INVALID_OPERATION;
     }
     mDeviceCallback.clear();
@@ -3051,7 +3053,7 @@
     case STATE_FLUSHED:
         return false;  // we're not active
     default:
-        LOG_ALWAYS_FATAL("%s(%d): Invalid mState in hasStarted(): %d", __func__, mId, mState);
+        LOG_ALWAYS_FATAL("%s(%d): Invalid mState in hasStarted(): %d", __func__, mPortId, mState);
         break;
     }
 
@@ -3068,7 +3070,7 @@
             wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
         }
         ALOGV("%s(%d): hasStarted wait:%d  ts:%u  start position:%lld",
-                __func__, mId,
+                __func__, mPortId,
                 (int)wait,
                 ts.mPosition,
                 (long long)mStartTs.mPosition);
@@ -3090,7 +3092,7 @@
             }
         }
         ALOGV("%s(%d): hasStarted wait:%d  ets:%lld  start position:%lld",
-                __func__, mId,
+                __func__, mPortId,
                 (int)wait,
                 (long long)ets.mPosition[location],
                 (long long)mStartEts.mPosition[location]);
@@ -3166,7 +3168,7 @@
         FALLTHROUGH_INTENDED;
     default:
         LOG_ALWAYS_FATAL_IF(ns < 0, "%s(%d): processAudioBuffer() returned %lld",
-                __func__, mReceiver.mId, (long long)ns);
+                __func__, mReceiver.mPortId, (long long)ns);
         pauseInternal(ns);
         return true;
     }
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index a406658..0ce8b16 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -62,6 +62,7 @@
     SET_EFFECT_ENABLED,
     IS_STREAM_ACTIVE_REMOTELY,
     IS_OFFLOAD_SUPPORTED,
+    IS_DIRECT_OUTPUT_SUPPORTED,
     LIST_AUDIO_PORTS,
     GET_AUDIO_PORT,
     CREATE_AUDIO_PATCH,
@@ -88,6 +89,7 @@
     REMOVE_SOURCE_DEFAULT_EFFECT,
     SET_ASSISTANT_UID,
     SET_A11Y_SERVICES_UIDS,
+    IS_HAPTIC_PLAYBACK_SUPPORTED,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -329,16 +331,13 @@
         return NO_ERROR;
     }
 
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool *silenced)
+    virtual status_t startInput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(portId);
-        data.writeInt32(*silenced ? 1 : 0);
         remote()->transact(START_INPUT, data, &reply);
         status_t status = static_cast <status_t> (reply.readInt32());
-        *silenced = reply.readInt32() == 1;
         return status;
     }
 
@@ -526,6 +525,16 @@
         return reply.readInt32();
     }
 
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(&config, sizeof(audio_config_base_t));
+        data.write(&attributes, sizeof(audio_attributes_t));
+        status_t status = remote()->transact(IS_DIRECT_OUTPUT_SUPPORTED, data, &reply);
+        return status == NO_ERROR ? static_cast<bool>(reply.readInt32()) : false;
+    }
+
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
                                     unsigned int *num_ports,
@@ -970,6 +979,17 @@
         return static_cast <status_t> (reply.readInt32());
     }
 
+    virtual bool isHapticPlaybackSupported()
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = remote()->transact(IS_HAPTIC_PLAYBACK_SUPPORTED, data, &reply);
+        if (status != NO_ERROR) {
+            return false;
+        }
+        return reply.readBool();
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1219,10 +1239,8 @@
         case START_INPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
-            bool silenced = data.readInt32() == 1;
-            status_t status = startInput(portId, &silenced);
+            status_t status = startInput(portId);
             reply->writeInt32(static_cast <uint32_t>(status));
-            reply->writeInt32(silenced ? 1 : 0);
             return NO_ERROR;
         } break;
 
@@ -1393,6 +1411,18 @@
             return NO_ERROR;
         }
 
+        case IS_DIRECT_OUTPUT_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_config_base_t config = {};
+            audio_attributes_t attributes = {};
+            status_t status = data.read(&config, sizeof(audio_config_base_t));
+            if (status != NO_ERROR) return status;
+            status = data.read(&attributes, sizeof(audio_attributes_t));
+            if (status != NO_ERROR) return status;
+            reply->writeInt32(isDirectOutputSupported(config, attributes));
+            return NO_ERROR;
+        }
+
         case LIST_AUDIO_PORTS: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_port_role_t role = (audio_port_role_t)data.readInt32();
@@ -1777,6 +1807,13 @@
             return NO_ERROR;
         }
 
+        case IS_HAPTIC_PLAYBACK_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            bool isSupported = isHapticPlaybackSupported();
+            reply->writeBool(isSupported);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
index adff057..83a568a 100644
--- a/media/libaudioclient/IAudioTrack.cpp
+++ b/media/libaudioclient/IAudioTrack.cpp
@@ -39,6 +39,7 @@
     PAUSE,
     ATTACH_AUX_EFFECT,
     SET_PARAMETERS,
+    SELECT_PRESENTATION,
     GET_TIMESTAMP,
     SIGNAL,
     APPLY_VOLUME_SHAPER,
@@ -127,6 +128,19 @@
         return status;
     }
 
+    /* Selects the presentation (if available) */
+    virtual status_t selectPresentation(int presentationId, int programId) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32(presentationId);
+        data.writeInt32(programId);
+        status_t status = remote()->transact(SELECT_PRESENTATION, data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
+
     virtual status_t getTimestamp(AudioTimestamp& timestamp) {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
@@ -239,6 +253,11 @@
             reply->writeInt32(setParameters(keyValuePairs));
             return NO_ERROR;
         } break;
+        case SELECT_PRESENTATION: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            reply->writeInt32(selectPresentation(data.readInt32(), data.readInt32()));
+            return NO_ERROR;
+        } break;
         case GET_TIMESTAMP: {
             CHECK_INTERFACE(IAudioTrack, data, reply);
             AudioTimestamp timestamp;
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index aa036a8..3ae7104 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -78,6 +78,8 @@
         DOWNMIX_TYPE    = 0X4004,
         MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
         MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
+        // for haptic
+        HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
         // for target RESAMPLE
         SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
                                   // parameter 'value' is the new sample rate in Hz.
@@ -137,6 +139,13 @@
     void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
 
     void        process() {
+        for (const auto &pair : mTracks) {
+            // Clear contracted buffer before processing if contracted channels are saved
+            const std::shared_ptr<Track> &t = pair.second;
+            if (t->mKeepContractedChannels) {
+                t->clearContractedBuffer();
+            }
+        }
         (this->*mHook)();
     }
 
@@ -235,6 +244,8 @@
             mPostDownmixReformatBufferProvider.reset(nullptr);
             mDownmixerBufferProvider.reset(nullptr);
             mReformatBufferProvider.reset(nullptr);
+            mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+            mAdjustChannelsBufferProvider.reset(nullptr);
         }
 
         bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
@@ -249,6 +260,11 @@
         void        unprepareForDownmix();
         status_t    prepareForReformat();
         void        unprepareForReformat();
+        status_t    prepareForAdjustChannels();
+        void        unprepareForAdjustChannels();
+        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
+        void        unprepareForAdjustChannelsNonDestructive();
+        void        clearContractedBuffer();
         bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
         void        reconfigureBufferProviders();
 
@@ -302,17 +318,22 @@
          * all pre-mixer track buffer conversions outside the AudioMixer class.
          *
          * 1) mInputBufferProvider: The AudioTrack buffer provider.
-         * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
+         * 2) mAdjustChannelsBufferProvider: Expend or contracts data
+         * 3) mAdjustChannelsNonDestructiveBufferProvider: Non-destructively adjust sample data
+         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
          *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
          *    requires reformat. For example, it may convert floating point input to
          *    PCM_16_bit if that's required by the downmixer.
-         * 3) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
          *    the number of channels required by the mixer sink.
-         * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
          *    the downmixer requirements to the mixer engine input requirements.
-         * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
+         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
          */
         AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
+        // TODO: combine AdjustChannelsBufferProvider and AdjustChannelsNonDestructiveBufferProvider
+        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsNonDestructiveBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
@@ -341,6 +362,18 @@
 
         AudioPlaybackRate    mPlaybackRate;
 
+        // Haptic
+        bool                 mHapticPlaybackEnabled;
+        audio_channel_mask_t mHapticChannelMask;
+        uint32_t             mHapticChannelCount;
+        audio_channel_mask_t mMixerHapticChannelMask;
+        uint32_t             mMixerHapticChannelCount;
+        uint32_t             mAdjustInChannelCount;
+        uint32_t             mAdjustOutChannelCount;
+        uint32_t             mAdjustNonDestructiveInChannelCount;
+        uint32_t             mAdjustNonDestructiveOutChannelCount;
+        bool                 mKeepContractedChannels;
+
     private:
         // hooks
         void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
index 49432b7..46de6b3 100644
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -123,4 +123,21 @@
     }
 }
 
+// Convert flags sent from Java AudioAttributes.getFlags() method to audio_output_flags_t
+static inline
+void audio_attributes_flags_to_audio_output_flags(const audio_flags_mask_t audioAttributeFlags,
+            audio_output_flags_t &flags) {
+    if ((audioAttributeFlags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+        flags = static_cast<audio_output_flags_t>(flags |
+            AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
+    if ((audioAttributeFlags & AUDIO_FLAG_LOW_LATENCY) != 0) {
+        flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_FAST);
+    }
+    // check deep buffer after flags have been modified above
+    if (flags == AUDIO_OUTPUT_FLAG_NONE && (audioAttributeFlags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
+        flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+    }
+}
+
 #endif //AUDIO_POLICY_HELPER_H_
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index c226557..35a7e05 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -534,9 +534,15 @@
      */
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
-    /*
-     * Dumps the state of an audio record.
-     */
+     /* Get the unique port ID assigned to this AudioRecord instance by audio policy manager.
+      * The ID is unique across all audioserver clients and can change during the life cycle
+      * of a given AudioRecord instance if the connection to audioserver is restored.
+      */
+            audio_port_handle_t getPortId() const { return mPortId; };
+
+     /*
+      * Dumps the state of an audio record.
+      */
             status_t    dump(int fd, const Vector<String16>& args) const;
 
 private:
@@ -654,7 +660,7 @@
     audio_input_flags_t     mOrigFlags;             // as specified in constructor or set(), const
 
     audio_session_t         mSessionId;
-    int                     mId;                    // Id from AudioFlinger
+    audio_port_handle_t     mPortId;                    // Id from Audio Policy Manager
     transfer_type           mTransfer;
 
     // Next 5 fields may be changed if IAudioRecord is re-created, but always != 0
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 76a79c9..74156ca 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -241,8 +241,7 @@
                                     audio_port_handle_t *selectedDeviceId,
                                     audio_port_handle_t *portId);
 
-    static status_t startInput(audio_port_handle_t portId,
-                               bool *silenced);
+    static status_t startInput(audio_port_handle_t portId);
     static status_t stopInput(audio_port_handle_t portId);
     static void releaseInput(audio_port_handle_t portId);
     static status_t initStreamVolume(audio_stream_type_t stream,
@@ -346,6 +345,8 @@
     static status_t setAssistantUid(uid_t uid);
     static status_t setA11yServicesUids(const std::vector<uid_t>& uids);
 
+    static bool     isHapticPlaybackSupported();
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 4b84fd1..7fdf7cc 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -147,6 +147,12 @@
                                      audio_stream_type_t streamType,
                                      uint32_t sampleRate);
 
+    /* Check if direct playback is possible for the given audio configuration and attributes.
+     * Return true if output is possible for the given parameters. Otherwise returns false.
+     */
+    static bool isDirectOutputSupported(const audio_config_base_t& config,
+                                        const audio_attributes_t& attributes);
+
     /* How data is transferred to AudioTrack
      */
     enum transfer_type {
@@ -912,7 +918,14 @@
                 AutoMutex lock(mLock);
                 return mState == STATE_ACTIVE || mState == STATE_STOPPING;
             }
-protected:
+
+    /* Get the unique port ID assigned to this AudioTrack instance by audio policy manager.
+     * The ID is unique across all audioserver clients and can change during the life cycle
+     * of a given AudioTrack instance if the connection to audioserver is restored.
+     */
+            audio_port_handle_t getPortId() const { return mPortId; };
+
+ protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
@@ -1166,7 +1179,7 @@
 
     audio_session_t         mSessionId;
     int                     mAuxEffectId;
-    int                     mId;                    // Id from AudioFlinger.
+    audio_port_handle_t     mPortId;                    // Id from Audio Policy Manager
 
     mutable Mutex           mLock;
 
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 52cc860..a34b207 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -146,7 +146,7 @@
             afSampleRate = parcel->readInt64();
             afLatencyMs = parcel->readInt32();
             (void)parcel->read(&outputId, sizeof(audio_io_handle_t));
-            (void)parcel->readInt32(&trackId);
+            (void)parcel->read(&portId, sizeof(audio_port_handle_t));
             return NO_ERROR;
         }
 
@@ -164,7 +164,7 @@
             (void)parcel->writeInt64(afSampleRate);
             (void)parcel->writeInt32(afLatencyMs);
             (void)parcel->write(&outputId, sizeof(audio_io_handle_t));
-            (void)parcel->writeInt32(trackId);
+            (void)parcel->write(&portId, sizeof(audio_port_handle_t));
             return NO_ERROR;
         }
 
@@ -181,7 +181,7 @@
         uint32_t afSampleRate;
         uint32_t afLatencyMs;
         audio_io_handle_t outputId;
-        int32_t trackId;
+        audio_port_handle_t portId;
     };
 
     /* CreateRecordInput contains all input arguments sent by AudioRecord to AudioFlinger
@@ -274,7 +274,7 @@
                     return BAD_VALUE;
                 }
             }
-            (void)parcel->readInt32(&trackId);
+            (void)parcel->read(&portId, sizeof(audio_port_handle_t));
             return NO_ERROR;
         }
 
@@ -301,7 +301,7 @@
             } else {
                 (void)parcel->writeInt32(0);
             }
-            (void)parcel->writeInt32(trackId);
+            (void)parcel->write(&portId, sizeof(audio_port_handle_t));
 
             return NO_ERROR;
         }
@@ -318,7 +318,7 @@
         audio_io_handle_t inputId;
         sp<IMemory> cblk;
         sp<IMemory> buffers;
-        int32_t trackId;
+        audio_port_handle_t portId;
     };
 
     // invariant on exit for all APIs that return an sp<>:
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index a246df6..61f3b27 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -78,8 +78,7 @@
                               audio_input_flags_t flags,
                               audio_port_handle_t *selectedDeviceId,
                               audio_port_handle_t *portId) = 0;
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool *silenced) = 0;
+    virtual status_t startInput(audio_port_handle_t portId) = 0;
     virtual status_t stopInput(audio_port_handle_t portId) = 0;
     virtual void releaseInput(audio_port_handle_t portId) = 0;
     virtual status_t initStreamVolume(audio_stream_type_t stream,
@@ -126,6 +125,10 @@
     // bit rate, duration, video and streaming or offload property is enabled
     virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
 
+    // Check if direct playback is possible for given format, sample rate, channel mask and flags.
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) = 0;
+
     /* List available audio ports and their attributes */
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
@@ -182,6 +185,8 @@
 
     virtual status_t setAssistantUid(uid_t uid) = 0;
     virtual status_t setA11yServicesUids(const std::vector<uid_t>& uids) = 0;
+
+    virtual bool     isHapticPlaybackSupported() = 0;
 };
 
 
diff --git a/media/libaudioclient/include/media/IAudioTrack.h b/media/libaudioclient/include/media/IAudioTrack.h
index 94afe3c..06e786d 100644
--- a/media/libaudioclient/include/media/IAudioTrack.h
+++ b/media/libaudioclient/include/media/IAudioTrack.h
@@ -70,6 +70,9 @@
     /* Send parameters to the audio hardware */
     virtual status_t    setParameters(const String8& keyValuePairs) = 0;
 
+    /* Selects the presentation (if available) */
+    virtual status_t    selectPresentation(int presentationId, int programId) = 0;
+
     /* Return NO_ERROR if timestamp is valid.  timestamp is undefined otherwise. */
     virtual status_t    getTimestamp(AudioTimestamp& timestamp) = 0;
 
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 0ff0d4a..584c2c0 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -15,10 +15,13 @@
     shared_libs: [
         "android.hardware.audio.effect@2.0",
         "android.hardware.audio.effect@4.0",
+        "android.hardware.audio.effect@5.0",
         "android.hardware.audio@2.0",
         "android.hardware.audio@4.0",
+        "android.hardware.audio@5.0",
         "libaudiohal@2.0",
         "libaudiohal@4.0",
+        "libaudiohal@5.0",
         "libutils",
     ],
 
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index e631ace..f86009c 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -16,6 +16,7 @@
 
 #include <android/hardware/audio/2.0/IDevicesFactory.h>
 #include <android/hardware/audio/4.0/IDevicesFactory.h>
+#include <android/hardware/audio/5.0/IDevicesFactory.h>
 
 #include <libaudiohal/FactoryHalHidl.h>
 
@@ -23,6 +24,9 @@
 
 // static
 sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
+    if (hardware::audio::V5_0::IDevicesFactory::getService() != nullptr) {
+        return V5_0::createDevicesFactoryHal();
+    }
     if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
         return V4_0::createDevicesFactoryHal();
     }
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index f7734a8..e21c235 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -16,6 +16,7 @@
 
 #include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
 #include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/5.0/IEffectsFactory.h>
 
 #include <libaudiohal/FactoryHalHidl.h>
 
@@ -23,6 +24,9 @@
 
 // static
 sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+    if (hardware::audio::effect::V5_0::IEffectsFactory::getService() != nullptr) {
+        return V5_0::createEffectsFactoryHal();
+    }
     if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
         return V4_0::createEffectsFactoryHal();
     }
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index 3827336..88533da 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -25,12 +25,6 @@
     ],
     shared_libs: [
         "android.hardware.audio.common-util",
-        "android.hardware.audio.common@2.0",
-        "android.hardware.audio.common@4.0",
-        "android.hardware.audio.effect@2.0",
-        "android.hardware.audio.effect@4.0",
-        "android.hardware.audio@2.0",
-        "android.hardware.audio@4.0",
         "android.hidl.allocator@1.0",
         "android.hidl.memory@1.0",
         "libaudiohal_deathhandler",
@@ -63,12 +57,15 @@
     name: "libaudiohal@2.0",
     defaults: ["libaudiohal_default"],
     shared_libs: [
+        "android.hardware.audio.common@2.0",
         "android.hardware.audio.common@2.0-util",
+        "android.hardware.audio.effect@2.0",
+        "android.hardware.audio@2.0",
     ],
     cflags: [
         "-DMAJOR_VERSION=2",
         "-DMINOR_VERSION=0",
-        "-include VersionMacro.h",
+        "-include common/all-versions/VersionMacro.h",
     ]
 }
 
@@ -76,11 +73,30 @@
     name: "libaudiohal@4.0",
     defaults: ["libaudiohal_default"],
     shared_libs: [
+        "android.hardware.audio.common@4.0",
         "android.hardware.audio.common@4.0-util",
+        "android.hardware.audio.effect@4.0",
+        "android.hardware.audio@4.0",
     ],
     cflags: [
         "-DMAJOR_VERSION=4",
         "-DMINOR_VERSION=0",
-        "-include VersionMacro.h",
+        "-include common/all-versions/VersionMacro.h",
+    ]
+}
+
+cc_library_shared {
+    name: "libaudiohal@5.0",
+    defaults: ["libaudiohal_default"],
+    shared_libs: [
+        "android.hardware.audio.common@5.0",
+        "android.hardware.audio.common@5.0-util",
+        "android.hardware.audio.effect@5.0",
+        "android.hardware.audio@5.0",
+    ],
+    cflags: [
+        "-DMAJOR_VERSION=5",
+        "-DMINOR_VERSION=0",
+        "-include common/all-versions/VersionMacro.h",
     ]
 }
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 5d12fad..9747859 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -24,7 +24,7 @@
 
 using ::android::hardware::audio::CPP_VERSION::Result;
 
-#if MAJOR_VERSION == 4
+#if MAJOR_VERSION >= 4
 using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneChannelMapping;
 using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneDirectionality;
 using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneLocation;
@@ -109,7 +109,7 @@
     ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
 }
 
-#if MAJOR_VERSION == 4
+#if MAJOR_VERSION >= 4
 // TODO: Use the same implementation in the hal when it moves to a util library.
 static std::string deviceAddressToHal(const DeviceAddress& address) {
     // HAL assumes that the address is NUL-terminated.
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
index 1a9319f..fb3bb9d 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -17,8 +17,7 @@
 #ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
 #define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
 
-#include <android/hardware/audio/2.0/types.h>
-#include <android/hardware/audio/4.0/types.h>
+#include PATH(android/hardware/audio/FILE_VERSION/types.h)
 #include <hidl/HidlSupport.h>
 #include <system/audio.h>
 #include <utils/String8.h>
@@ -83,7 +82,7 @@
     void emitError(const char* funcName, const char* description);
 };
 
-#if MAJOR_VERSION == 4
+#if MAJOR_VERSION >= 4
 using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
 void microphoneInfoToHal(const MicrophoneInfo& src,
                          audio_microphone_characteristic_t *pDst);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 723e2eb..8537608 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -19,8 +19,7 @@
 #define LOG_TAG "DeviceHalHidl"
 //#define LOG_NDEBUG 0
 
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <android/hardware/audio/4.0/IPrimaryDevice.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
 #include <cutils/native_handle.h>
 #include <hwbinder/IPCThreadState.h>
 #include <utils/Log.h>
@@ -32,31 +31,17 @@
 #include "StreamHalHidl.h"
 #include "VersionUtils.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::AudioConfig;
-using ::android::hardware::audio::common::CPP_VERSION::AudioDevice;
-using ::android::hardware::audio::common::CPP_VERSION::AudioInputFlag;
-using ::android::hardware::audio::common::CPP_VERSION::AudioOutputFlag;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPatchHandle;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPort;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPortConfig;
-using ::android::hardware::audio::common::CPP_VERSION::AudioMode;
-using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
-using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
-using ::android::hardware::audio::common::utils::mkEnumConverter;
-using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
-using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::common::utils::EnumBitfield;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 
-#if MAJOR_VERSION == 4
-using ::android::hardware::audio::CPP_VERSION::SinkMetadata;
-#endif
-
 namespace android {
 namespace CPP_VERSION {
 
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::CPP_VERSION;
+
 namespace {
 
 status_t deviceAddressFromHal(
@@ -262,8 +247,8 @@
             handle,
             hidlDevice,
             hidlConfig,
-            mkEnumConverter<AudioOutputFlag>(flags),
-#if MAJOR_VERSION == 4
+            EnumBitfield<AudioOutputFlag>(flags),
+#if MAJOR_VERSION >= 4
             {} /* metadata */,
 #endif
             [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
@@ -293,7 +278,7 @@
     Result retval = Result::NOT_INITIALIZED;
 #if MAJOR_VERSION == 2
     auto sourceMetadata = AudioSource(source);
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
     // TODO: correctly propagate the tracks sources and volume
     //       for now, only send the main source at 1dbfs
     SinkMetadata sourceMetadata = {{{AudioSource(source), 1}}};
@@ -302,7 +287,7 @@
             handle,
             hidlDevice,
             hidlConfig,
-            mkEnumConverter<AudioInputFlag>(flags),
+            EnumBitfield<AudioInputFlag>(flags),
             sourceMetadata,
             [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
                 retval = r;
@@ -375,7 +360,7 @@
     if (mDevice == 0) return NO_INIT;
     return INVALID_OPERATION;
 }
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
     if (mDevice == 0) return NO_INIT;
     Result retval;
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index fb5e7e7..291c88f 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -17,10 +17,8 @@
 #ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
 #define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
 
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <android/hardware/audio/4.0/IPrimaryDevice.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IDevice.h)
+#include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
 #include <media/audiohal/DeviceHalInterface.h>
 
 #include "ConversionHelperHidl.h"
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index 14e26f5..dffe9da 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -190,7 +190,7 @@
         std::vector<media::MicrophoneInfo> *microphones __unused) {
     return INVALID_OPERATION;
 }
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
     if (mDev->get_microphones == NULL) return INVALID_OPERATION;
     size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 28001da..5e01e42 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -20,8 +20,7 @@
 #define LOG_TAG "DevicesFactoryHalHidl"
 //#define LOG_NDEBUG 0
 
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <android/hardware/audio/4.0/IDevice.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IDevice.h)
 #include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 
@@ -76,7 +75,7 @@
     *status = BAD_VALUE;
     return {};
 }
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 static const char* idFromHal(const char *name, status_t* status) {
     *status = OK;
     return name;
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index a4282b0..27e0649 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -17,8 +17,7 @@
 #ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
 #define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
 
-#include <android/hardware/audio/2.0/IDevicesFactory.h>
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IDevicesFactory.h)
 #include <media/audiohal/DevicesFactoryHalInterface.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
index 029d71a..0c99a02 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -17,8 +17,7 @@
 #ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
 #define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
 
-#include <android/hardware/audio/effect/2.0/types.h>
-#include <android/hardware/audio/effect/4.0/types.h>
+#include PATH(android/hardware/audio/effect/FILE_VERSION/types.h)
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidl/HidlSupport.h>
 #include <media/audiohal/EffectBufferHalInterface.h>
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 12649a1..df79b95 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -31,10 +31,8 @@
 using ::android::hardware::audio::effect::CPP_VERSION::EffectConfigParameters;
 using ::android::hardware::audio::effect::CPP_VERSION::MessageQueueFlagBits;
 using ::android::hardware::audio::effect::CPP_VERSION::Result;
-using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
-using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
-using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
-using ::android::hardware::audio::common::utils::mkEnumConverter;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::common::utils::EnumBitfield;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::MQDescriptorSync;
 using ::android::hardware::Return;
@@ -42,6 +40,8 @@
 namespace android {
 namespace CPP_VERSION {
 
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+
 EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
         : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
 }
@@ -77,10 +77,10 @@
 void EffectHalHidl::effectBufferConfigFromHal(
         const buffer_config_t& halConfig, EffectBufferConfig* config) {
     config->samplingRateHz = halConfig.samplingRate;
-    config->channels = mkEnumConverter<AudioChannelMask>(halConfig.channels);
+    config->channels = EnumBitfield<AudioChannelMask>(halConfig.channels);
     config->format = AudioFormat(halConfig.format);
     config->accessMode = EffectBufferAccess(halConfig.accessMode);
-    config->mask = mkEnumConverter<EffectConfigParameters>(halConfig.mask);
+    config->mask = EnumBitfield<EffectConfigParameters>(halConfig.mask);
 }
 
 // static
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index 04f40d3..cd447ff 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -17,8 +17,7 @@
 #ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
 #define ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
 
-#include <android/hardware/audio/effect/2.0/IEffect.h>
-#include <android/hardware/audio/effect/4.0/IEffect.h>
+#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffect.h)
 #include <media/audiohal/EffectHalInterface.h>
 #include <fmq/EventFlag.h>
 #include <fmq/MessageQueue.h>
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index b880433..7fea466 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -25,8 +25,7 @@
 #include "EffectHalHidl.h"
 #include "HidlUtils.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
-using ::android::hardware::audio::common::CPP_VERSION::Uuid;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
 using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
 using ::android::hardware::audio::effect::CPP_VERSION::Result;
 using ::android::hardware::Return;
@@ -34,6 +33,8 @@
 namespace android {
 namespace CPP_VERSION {
 
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+
 EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
     mEffectsFactory = IEffectsFactory::getService();
     if (mEffectsFactory == 0) {
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index c6fced7..7027153 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -17,10 +17,8 @@
 #ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
 #define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
 
-#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/2.0/types.h>
-#include <android/hardware/audio/effect/4.0/types.h>
+#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
+#include PATH(android/hardware/audio/effect/FILE_VERSION/types.h)
 #include <media/audiohal/EffectsFactoryHalInterface.h>
 
 #include "ConversionHelperHidl.h"
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index bfa80e8..9765f1e 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -17,9 +17,9 @@
 #define LOG_TAG "StreamHalHidl"
 //#define LOG_NDEBUG 0
 
-#include <android/hardware/audio/2.0/IStreamOutCallback.h>
-#include <android/hardware/audio/4.0/IStreamOutCallback.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutCallback.h)
 #include <hwbinder/IPCThreadState.h>
+#include <media/AudioParameter.h>
 #include <mediautils/SchedulingPolicyService.h>
 #include <utils/Log.h>
 
@@ -28,34 +28,18 @@
 #include "StreamHalHidl.h"
 #include "VersionUtils.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
-using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
-using ::android::hardware::audio::common::CPP_VERSION::ThreadInfo;
-using ::android::hardware::audio::CPP_VERSION::AudioDrain;
-using ::android::hardware::audio::CPP_VERSION::IStreamOutCallback;
-using ::android::hardware::audio::CPP_VERSION::MessageQueueFlagBits;
-using ::android::hardware::audio::CPP_VERSION::MmapBufferInfo;
-using ::android::hardware::audio::CPP_VERSION::MmapPosition;
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using ::android::hardware::audio::CPP_VERSION::Result;
-using ::android::hardware::audio::CPP_VERSION::TimeSpec;
 using ::android::hardware::MQDescriptorSync;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
-
-#if MAJOR_VERSION == 4
-using ::android::hardware::audio::common::CPP_VERSION::AudioContentType;
-using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
-using ::android::hardware::audio::common::CPP_VERSION::AudioUsage;
-using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
-using ::android::hardware::audio::CPP_VERSION::PlaybackTrackMetadata;
-using ::android::hardware::audio::CPP_VERSION::RecordTrackMetadata;
-#endif
 
 namespace android {
 namespace CPP_VERSION {
 
+using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
+
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::CPP_VERSION;
+
 StreamHalHidl::StreamHalHidl(IStream *stream)
         : ConversionHelperHidl("Stream"),
           mStream(stream),
@@ -192,7 +176,7 @@
                     const native_handle *handle = hidlInfo.sharedMemory.handle();
                     if (handle->numFds > 0) {
                         info->shared_memory_fd = handle->data[0];
-#if MAJOR_VERSION == 4
+#if MAJOR_VERSION >= 4
                         info->flags = audio_mmap_buffer_flag(hidlInfo.flags);
 #endif
                         info->buffer_size_frames = hidlInfo.bufferSizeFrames;
@@ -347,6 +331,24 @@
     return processReturn("setVolume", mStream->setVolume(left, right));
 }
 
+#if MAJOR_VERSION == 2
+status_t StreamOutHalHidl::selectPresentation(int presentationId, int programId) {
+    if (mStream == 0) return NO_INIT;
+    std::vector<ParameterValue> parameters;
+    String8 halParameters;
+    parameters.push_back({AudioParameter::keyPresentationId, std::to_string(presentationId)});
+    parameters.push_back({AudioParameter::keyProgramId, std::to_string(programId)});
+    parametersToHal(hidl_vec<ParameterValue>(parameters), &halParameters);
+    return setParameters(halParameters);
+}
+#elif MAJOR_VERSION >= 4
+status_t StreamOutHalHidl::selectPresentation(int presentationId, int programId) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn("selectPresentation",
+            mStream->selectPresentation(presentationId, programId));
+}
+#endif
+
 status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
     if (mStream == 0) return NO_INIT;
     *written = 0;
@@ -581,11 +583,12 @@
 }
 
 #if MAJOR_VERSION == 2
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+status_t StreamOutHalHidl::updateSourceMetadata(
+        const StreamOutHalInterface::SourceMetadata& /* sourceMetadata */) {
     // Audio HAL V2.0 does not support propagating source metadata
     return INVALID_OPERATION;
 }
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 /** Transform a standard collection to an HIDL vector. */
 template <class Values, class ElementConverter>
 static auto transformToHidlVec(const Values& values, ElementConverter converter) {
@@ -595,8 +598,9 @@
     return result;
 }
 
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
-    hardware::audio::CPP_VERSION::SourceMetadata halMetadata = {
+status_t StreamOutHalHidl::updateSourceMetadata(
+        const StreamOutHalInterface::SourceMetadata& sourceMetadata) {
+    CPP_VERSION::SourceMetadata halMetadata = {
         .tracks = transformToHidlVec(sourceMetadata.tracks,
               [](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
                   return {
@@ -810,12 +814,13 @@
     return INVALID_OPERATION;
 }
 
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+status_t StreamInHalHidl::updateSinkMetadata(
+        const StreamInHalInterface::SinkMetadata& /* sinkMetadata */) {
     // Audio HAL V2.0 does not support propagating sink metadata
     return INVALID_OPERATION;
 }
 
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 status_t StreamInHalHidl::getActiveMicrophones(
         std::vector<media::MicrophoneInfo> *microphonesInfo) {
     if (!mStream) return NO_INIT;
@@ -834,8 +839,9 @@
     return processReturn("getActiveMicrophones", ret, retval);
 }
 
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
-    hardware::audio::CPP_VERSION::SinkMetadata halMetadata = {
+status_t StreamInHalHidl::updateSinkMetadata(const
+        StreamInHalInterface::SinkMetadata& sinkMetadata) {
+    CPP_VERSION::SinkMetadata halMetadata = {
         .tracks = transformToHidlVec(sinkMetadata.tracks,
               [](const record_track_metadata& metadata) -> RecordTrackMetadata {
                   return {
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 95ec7f1..f7b507e 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -19,12 +19,9 @@
 
 #include <atomic>
 
-#include <android/hardware/audio/2.0/IStream.h>
-#include <android/hardware/audio/4.0/IStream.h>
-#include <android/hardware/audio/2.0/IStreamIn.h>
-#include <android/hardware/audio/4.0/IStreamIn.h>
-#include <android/hardware/audio/2.0/IStreamOut.h>
-#include <android/hardware/audio/4.0/IStreamOut.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IStream.h)
+#include PATH(android/hardware/audio/FILE_VERSION/IStreamIn.h)
+#include PATH(android/hardware/audio/FILE_VERSION/IStreamOut.h)
 #include <fmq/EventFlag.h>
 #include <fmq/MessageQueue.h>
 #include <media/audiohal/StreamHalInterface.h>
@@ -131,6 +128,9 @@
     // Use this method in situations where audio mixing is done in the hardware.
     virtual status_t setVolume(float left, float right);
 
+    // Selects the audio presentation (if available).
+    virtual status_t selectPresentation(int presentationId, int programId);
+
     // Write audio buffer to driver.
     virtual status_t write(const void *buffer, size_t bytes, size_t *written);
 
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index b134f57..26d30d4 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include <hardware/audio.h>
+#include <media/AudioParameter.h>
 #include <utils/Log.h>
 
 #include "DeviceHalLocal.h"
@@ -138,6 +139,13 @@
     return mStream->set_volume(mStream, left, right);
 }
 
+status_t StreamOutHalLocal::selectPresentation(int presentationId, int programId) {
+    AudioParameter param;
+    param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
+    param.addInt(String8(AudioParameter::keyProgramId), programId);
+    return setParameters(param.toString());
+}
+
 status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
     ssize_t writeResult = mStream->write(mStream, buffer, bytes);
     if (writeResult > 0) {
@@ -346,7 +354,7 @@
         std::vector<media::MicrophoneInfo> *microphones __unused) {
     return INVALID_OPERATION;
 }
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
     if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
     size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index cea4229..4fd1960 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -103,6 +103,9 @@
     // Use this method in situations where audio mixing is done in the hardware.
     virtual status_t setVolume(float left, float right);
 
+    // Selects the audio presentation (if available).
+    virtual status_t selectPresentation(int presentationId, int programId);
+
     // Write audio buffer to driver.
     virtual status_t write(const void *buffer, size_t bytes, size_t *written);
 
diff --git a/media/libaudiohal/impl/VersionMacro.h b/media/libaudiohal/impl/VersionMacro.h
deleted file mode 100644
index 98e9c07..0000000
--- a/media/libaudiohal/impl/VersionMacro.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_VERSION_MACRO_H
-#define ANDROID_HARDWARE_VERSION_MACRO_H
-
-#if !defined(MAJOR_VERSION) || !defined(MINOR_VERSION)
-#error "MAJOR_VERSION and MINOR_VERSION must be defined"
-#endif
-
-#define CONCAT_3(a,b,c) a##b##c
-#define EXPAND_CONCAT_3(a,b,c) CONCAT_3(a,b,c)
-/** The directory name of the version: <major>.<minor> */
-#define FILE_VERSION EXPAND_CONCAT_3(MAJOR_VERSION,.,MINOR_VERSION)
-
-#define CONCAT_4(a,b,c,d) a##b##c##d
-#define EXPAND_CONCAT_4(a,b,c,d) CONCAT_4(a,b,c,d)
-/** The c++ namespace of the version: V<major>_<minor> */
-#define CPP_VERSION EXPAND_CONCAT_4(V,MAJOR_VERSION,_,MINOR_VERSION)
-
-#endif // ANDROID_HARDWARE_VERSION_MACRO_H
diff --git a/media/libaudiohal/impl/VersionUtils.h b/media/libaudiohal/impl/VersionUtils.h
index 5004895..eb0a42a 100644
--- a/media/libaudiohal/impl/VersionUtils.h
+++ b/media/libaudiohal/impl/VersionUtils.h
@@ -17,8 +17,7 @@
 #ifndef ANDROID_HARDWARE_VERSION_UTILS_H
 #define ANDROID_HARDWARE_VERSION_UTILS_H
 
-#include <android/hardware/audio/2.0/types.h>
-#include <android/hardware/audio/4.0/types.h>
+#include PATH(android/hardware/audio/FILE_VERSION/types.h)
 #include <hidl/HidlSupport.h>
 
 using ::android::hardware::audio::CPP_VERSION::ParameterValue;
@@ -43,7 +42,7 @@
                              hidl_vec<ParameterValue> keys) {
     return object->setParameters(keys);
 }
-#elif MAJOR_VERSION == 4
+#elif MAJOR_VERSION >= 4
 template <class T, class Callback>
 Return<void> getParameters(T& object, hidl_vec<ParameterValue> context,
                            hidl_vec<hidl_string> keys, Callback callback) {
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
index fa0effc..1d912a0 100644
--- a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -35,6 +35,11 @@
 sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
 } // namespace V4_0
 
+namespace V5_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V5_0
+
 } // namespace android
 
 #endif // ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index c969e28..bd71dc0 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -109,6 +109,9 @@
     // Use this method in situations where audio mixing is done in the hardware.
     virtual status_t setVolume(float left, float right) = 0;
 
+    // Selects the audio presentation (if available).
+    virtual status_t selectPresentation(int presentationId, int programId) = 0;
+
     // Write audio buffer to driver.
     virtual status_t write(const void *buffer, size_t bytes, size_t *written) = 0;
 
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index af54b21..2567b3b 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -136,6 +136,9 @@
 
         // no initialization needed
         // t->frameCount
+        t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+        t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
+        channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
         t->channelCount = audio_channel_count_from_out_mask(channelMask);
         t->enabled = false;
         ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
@@ -162,6 +165,15 @@
                 AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
         t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
         t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+        // haptic
+        t->mHapticPlaybackEnabled = false;
+        t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
+        t->mMixerHapticChannelCount = 0;
+        t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
+        t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
+        t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
+        t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+        t->mKeepContractedChannels = false;
         // Check the downmixing (or upmixing) requirements.
         status_t status = t->prepareForDownmix();
         if (status != OK) {
@@ -171,6 +183,8 @@
         // prepareForDownmix() may change mDownmixRequiresFormat
         ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
         t->prepareForReformat();
+        t->prepareForAdjustChannelsNonDestructive(mFrameCount);
+        t->prepareForAdjustChannels();
 
         mTracks[name] = t;
         return OK;
@@ -185,13 +199,20 @@
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
     const std::shared_ptr<Track> &track = mTracks[name];
 
-    if (trackChannelMask == track->channelMask
-            && mixerChannelMask == track->mMixerChannelMask) {
+    if (trackChannelMask == (track->channelMask | track->mHapticChannelMask)
+            && mixerChannelMask == (track->mMixerChannelMask | track->mMixerHapticChannelMask)) {
         return false;  // no need to change
     }
+    const audio_channel_mask_t hapticChannelMask = trackChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+    trackChannelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+    const audio_channel_mask_t mixerHapticChannelMask = mixerChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+    mixerChannelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
     // always recompute for both channel masks even if only one has changed.
     const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
     const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+    const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(hapticChannelMask);
+    const uint32_t mixerHapticChannelCount =
+            audio_channel_count_from_out_mask(mixerHapticChannelMask);
 
     ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
             && trackChannelCount
@@ -200,6 +221,24 @@
     track->channelCount = trackChannelCount;
     track->mMixerChannelMask = mixerChannelMask;
     track->mMixerChannelCount = mixerChannelCount;
+    track->mHapticChannelMask = hapticChannelMask;
+    track->mHapticChannelCount = hapticChannelCount;
+    track->mMixerHapticChannelMask = mixerHapticChannelMask;
+    track->mMixerHapticChannelCount = mixerHapticChannelCount;
+
+    if (track->mHapticChannelCount > 0) {
+        track->mAdjustInChannelCount = track->channelCount + track->mHapticChannelCount;
+        track->mAdjustOutChannelCount = track->channelCount + track->mMixerHapticChannelCount;
+        track->mAdjustNonDestructiveInChannelCount = track->mAdjustOutChannelCount;
+        track->mAdjustNonDestructiveOutChannelCount = track->channelCount;
+        track->mKeepContractedChannels = track->mHapticPlaybackEnabled;
+    } else {
+        track->mAdjustInChannelCount = 0;
+        track->mAdjustOutChannelCount = 0;
+        track->mAdjustNonDestructiveInChannelCount = 0;
+        track->mAdjustNonDestructiveOutChannelCount = 0;
+        track->mKeepContractedChannels = false;
+    }
 
     // channel masks have changed, does this track need a downmixer?
     // update to try using our desired format (if we aren't already using it)
@@ -212,6 +251,9 @@
     // do it after downmix since track format may change!
     track->prepareForReformat();
 
+    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+    track->prepareForAdjustChannels();
+
     if (track->mResampler.get() != nullptr) {
         // resampler channels may have changed.
         const uint32_t resetToSampleRate = track->sampleRate;
@@ -335,10 +377,82 @@
     return NO_ERROR;
 }
 
+void AudioMixer::Track::unprepareForAdjustChannels()
+{
+    ALOGV("AUDIOMIXER::unprepareForAdjustChannels");
+    if (mAdjustChannelsBufferProvider.get() != nullptr) {
+        mAdjustChannelsBufferProvider.reset(nullptr);
+        reconfigureBufferProviders();
+    }
+}
+
+status_t AudioMixer::Track::prepareForAdjustChannels()
+{
+    ALOGV("AudioMixer::prepareForAdjustChannels(%p) with inChannelCount: %u, outChannelCount: %u",
+            this, mAdjustInChannelCount, mAdjustOutChannelCount);
+    unprepareForAdjustChannels();
+    if (mAdjustInChannelCount != mAdjustOutChannelCount) {
+        mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
+                mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, kCopyBufferFrameCount));
+        reconfigureBufferProviders();
+    }
+    return NO_ERROR;
+}
+
+void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
+{
+    ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
+    if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+        reconfigureBufferProviders();
+    }
+}
+
+status_t AudioMixer::Track::prepareForAdjustChannelsNonDestructive(size_t frames)
+{
+    ALOGV("AudioMixer::prepareForAdjustChannelsNonDestructive(%p) with inChannelCount: %u, "
+          "outChannelCount: %u, keepContractedChannels: %d",
+            this, mAdjustNonDestructiveInChannelCount, mAdjustNonDestructiveOutChannelCount,
+            mKeepContractedChannels);
+    unprepareForAdjustChannelsNonDestructive();
+    if (mAdjustNonDestructiveInChannelCount != mAdjustNonDestructiveOutChannelCount) {
+        uint8_t* buffer = mKeepContractedChannels
+                ? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
+                        mMixerChannelCount, mMixerFormat)
+                : NULL;
+        mAdjustChannelsNonDestructiveBufferProvider.reset(
+                new AdjustChannelsNonDestructiveBufferProvider(
+                        mFormat,
+                        mAdjustNonDestructiveInChannelCount,
+                        mAdjustNonDestructiveOutChannelCount,
+                        mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
+                        frames,
+                        buffer));
+        reconfigureBufferProviders();
+    }
+    return NO_ERROR;
+}
+
+void AudioMixer::Track::clearContractedBuffer()
+{
+    if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        static_cast<AdjustChannelsNonDestructiveBufferProvider*>(
+                mAdjustChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+    }
+}
+
 void AudioMixer::Track::reconfigureBufferProviders()
 {
     // configure from upstream to downstream buffer providers.
     bufferProvider = mInputBufferProvider;
+    if (mAdjustChannelsBufferProvider.get() != nullptr) {
+        mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mAdjustChannelsBufferProvider.get();
+    }
+    if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        mAdjustChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mAdjustChannelsNonDestructiveBufferProvider.get();
+    }
     if (mReformatBufferProvider.get() != nullptr) {
         mReformatBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mReformatBufferProvider.get();
@@ -533,7 +647,8 @@
         case CHANNEL_MASK: {
             const audio_channel_mask_t trackChannelMask =
                 static_cast<audio_channel_mask_t>(valueInt);
-            if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
+            if (setChannelMasks(name, trackChannelMask,
+                    (track->mMixerChannelMask | track->mMixerHapticChannelMask))) {
                 ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
                 invalidate();
             }
@@ -542,6 +657,9 @@
             if (track->mainBuffer != valueBuf) {
                 track->mainBuffer = valueBuf;
                 ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+                if (track->mKeepContractedChannels) {
+                    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                }
                 invalidate();
             }
             break;
@@ -571,16 +689,29 @@
             if (track->mMixerFormat != format) {
                 track->mMixerFormat = format;
                 ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+                if (track->mKeepContractedChannels) {
+                    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                }
             }
             } break;
         case MIXER_CHANNEL_MASK: {
             const audio_channel_mask_t mixerChannelMask =
                     static_cast<audio_channel_mask_t>(valueInt);
-            if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
+            if (setChannelMasks(name, track->channelMask | track->mHapticChannelMask,
+                    mixerChannelMask)) {
                 ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
                 invalidate();
             }
             } break;
+        case HAPTIC_ENABLED: {
+            const bool hapticPlaybackEnabled = static_cast<bool>(valueInt);
+            if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
+                track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
+                track->mKeepContractedChannels = hapticPlaybackEnabled;
+                track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                track->prepareForAdjustChannels();
+            }
+            } break;
         default:
             LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
         }
@@ -823,6 +954,10 @@
         track->mDownmixerBufferProvider->reset();
     } else if (track->mReformatBufferProvider.get() != nullptr) {
         track->mReformatBufferProvider->reset();
+    } else if (track->mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        track->mAdjustChannelsNonDestructiveBufferProvider->reset();
+    } else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
+        track->mAdjustChannelsBufferProvider->reset();
     }
 
     track->mInputBufferProvider = bufferProvider;
@@ -1266,8 +1401,8 @@
 
         const std::shared_ptr<Track> &t = mTracks[group[0]];
         memset(t->mainBuffer, 0,
-                mFrameCount * t->mMixerChannelCount
-                * audio_bytes_per_sample(t->mMixerFormat));
+                mFrameCount * audio_bytes_per_frame(
+                        t->mMixerChannelCount + t->mMixerHapticChannelCount, t->mMixerFormat));
 
         // now consume data
         for (const int name : group) {
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 2d9e1cb..a1a1a0d 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -19,6 +19,7 @@
 
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
+#include <audio_utils/channels.h>
 #include <external/sonic/sonic.h>
 #include <media/audiohal/EffectBufferHalInterface.h>
 #include <media/audiohal/EffectHalInterface.h>
@@ -63,7 +64,8 @@
 
 CopyBufferProvider::~CopyBufferProvider()
 {
-    ALOGV("~CopyBufferProvider(%p)", this);
+    ALOGV("%s(%p) %zu %p %p",
+           __func__, this, mBuffer.frameCount, mTrackBufferProvider, mLocalBufferData);
     if (mBuffer.frameCount != 0) {
         mTrackBufferProvider->releaseBuffer(&mBuffer);
     }
@@ -133,6 +135,16 @@
     mConsumed = 0;
 }
 
+void CopyBufferProvider::setBufferProvider(AudioBufferProvider *p) {
+    ALOGV("%s(%p): mTrackBufferProvider:%p  mBuffer.frameCount:%zu",
+            __func__, p, mTrackBufferProvider, mBuffer.frameCount);
+    if (mTrackBufferProvider == p) {
+        return;
+    }
+    mBuffer.frameCount = 0;
+    PassthruBufferProvider::setBufferProvider(p);
+}
+
 DownmixerBufferProvider::DownmixerBufferProvider(
         audio_channel_mask_t inputChannelMask,
         audio_channel_mask_t outputChannelMask, audio_format_t format,
@@ -528,6 +540,16 @@
     mRemaining = 0;
 }
 
+void TimestretchBufferProvider::setBufferProvider(AudioBufferProvider *p) {
+    ALOGV("%s(%p): mTrackBufferProvider:%p  mBuffer.frameCount:%zu",
+            __func__, p, mTrackBufferProvider, mBuffer.frameCount);
+    if (mTrackBufferProvider == p) {
+        return;
+    }
+    mBuffer.frameCount = 0;
+    PassthruBufferProvider::setBufferProvider(p);
+}
+
 status_t TimestretchBufferProvider::setPlaybackRate(const AudioPlaybackRate &playbackRate)
 {
     mPlaybackRate = playbackRate;
@@ -609,5 +631,83 @@
         }
     }
 }
+
+AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(audio_format_t format,
+        size_t inChannelCount, size_t outChannelCount, size_t frameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_frame(inChannelCount, format),
+                audio_bytes_per_frame(outChannelCount, format),
+                frameCount),
+        mFormat(format),
+        mInChannelCount(inChannelCount),
+        mOutChannelCount(outChannelCount),
+        mSampleSizeInBytes(audio_bytes_per_sample(format))
+{
+    ALOGV("AdjustBufferProvider(%p)(%#x, %zu, %zu, %zu)",
+            this, format, inChannelCount, outChannelCount, frameCount);
+}
+
+void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    adjust_channels(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
+            frames * mInChannelCount * mSampleSizeInBytes);
+}
+
+AdjustChannelsNonDestructiveBufferProvider::AdjustChannelsNonDestructiveBufferProvider(
+        audio_format_t format, size_t inChannelCount, size_t outChannelCount,
+        audio_format_t contractedFormat, size_t contractedFrameCount, void* contractedBuffer) :
+        CopyBufferProvider(
+                audio_bytes_per_frame(inChannelCount, format),
+                audio_bytes_per_frame(outChannelCount, format),
+                0 /*bufferFrameCount*/),
+        mFormat(format),
+        mInChannelCount(inChannelCount),
+        mOutChannelCount(outChannelCount),
+        mSampleSizeInBytes(audio_bytes_per_sample(format)),
+        mContractedChannelCount(inChannelCount - outChannelCount),
+        mContractedFormat(contractedFormat),
+        mContractedFrameCount(contractedFrameCount),
+        mContractedBuffer(contractedBuffer),
+        mContractedWrittenFrames(0)
+{
+    ALOGV("AdjustChannelsNonDestructiveBufferProvider(%p)(%#x, %zu, %zu, %#x, %p)",
+            this, format, inChannelCount, outChannelCount, contractedFormat, contractedBuffer);
+    if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
+        mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
+    }
+}
+
+status_t AdjustChannelsNonDestructiveBufferProvider::getNextBuffer(
+        AudioBufferProvider::Buffer* pBuffer)
+{
+    const size_t outFramesLeft = mContractedFrameCount - mContractedWrittenFrames;
+    if (outFramesLeft < pBuffer->frameCount) {
+        // Restrict the frame count so that we don't write over the size of the output buffer.
+        pBuffer->frameCount = outFramesLeft;
+    }
+    return CopyBufferProvider::getNextBuffer(pBuffer);
+}
+
+void AdjustChannelsNonDestructiveBufferProvider::copyFrames(
+        void *dst, const void *src, size_t frames)
+{
+    adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
+            frames * mInChannelCount * mSampleSizeInBytes);
+    if (mContractedFormat != AUDIO_FORMAT_INVALID && mContractedBuffer != NULL
+            && mInChannelCount > mOutChannelCount) {
+        const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+        memcpy_by_audio_format(
+                (uint8_t*)mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
+                mContractedFormat, (uint8_t*)dst + contractedIdx, mFormat,
+                mContractedChannelCount * frames);
+        mContractedWrittenFrames += frames;
+    }
+}
+
+void AdjustChannelsNonDestructiveBufferProvider::reset()
+{
+    mContractedWrittenFrames = 0;
+    CopyBufferProvider::reset();
+}
 // ----------------------------------------------------------------------------
 } // namespace android
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 5c57c43..7a32d3f 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -129,11 +129,14 @@
         "Common/lib",
         "Bundle/lib",
     ],
-
+    shared_libs: [
+        "liblog",
+    ],
     cflags: [
         "-fvisibility=hidden",
         "-DBUILD_FLOAT",
         "-DHIGHER_FS",
+        "-DSUPPORT_MC",
 
         "-Wall",
         "-Werror",
diff --git a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
index 4c2b954..a1fa79a 100644
--- a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
+++ b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
@@ -256,6 +256,9 @@
     LVDBE_Volume_en         VolumeControl;
     LVM_INT16               VolumedB;
     LVM_INT16               HeadroomdB;
+#ifdef SUPPORT_MC
+    LVM_INT16               NrChannels;
+#endif
 
 } LVDBE_Params_t;
 
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c
index fd4016b..0ba2c86 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c
@@ -118,7 +118,7 @@
      * Calculate the table offsets
      */
     LVM_UINT16 Offset = (LVM_UINT16)((LVM_UINT16)pParams->SampleRate + \
-                                    (LVM_UINT16)(pParams->CentreFrequency * (1+LVDBE_FS_48000)));    
+                                    (LVM_UINT16)(pParams->CentreFrequency * (1+LVDBE_FS_48000)));
 #endif
 
     /*
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c
index 3fff2a2..2946734 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c
@@ -95,7 +95,7 @@
 #ifdef BUILD_FLOAT
         pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size   = sizeof(LVDBE_Coef_FLOAT_t);
 #else
-        pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size         = sizeof(LVDBE_Coef_t);   
+        pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size         = sizeof(LVDBE_Coef_t);
 #endif
         pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Alignment    = LVDBE_PERSISTENT_COEF_ALIGN;
         pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Type         = LVDBE_PERSISTENT_COEF;
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
index 4e5207f..4225a30 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
@@ -64,7 +64,12 @@
 #define LVDBE_PERSISTENT_COEF_ALIGN      4       /* 32-bit alignment for coef */
 #define LVDBE_SCRATCH_ALIGN              4       /* 32-bit alignment for long data */
 
+#ifdef SUPPORT_MC
+/* Number of buffers required for inplace processing */
+#define LVDBE_SCRATCHBUFFERS_INPLACE     (LVM_MAX_CHANNELS * 3)
+#else
 #define LVDBE_SCRATCHBUFFERS_INPLACE     6       /* Number of buffers required for inplace processing */
+#endif
 
 #define LVDBE_MIXER_TC                   5       /* Mixer time  */
 #define LVDBE_BYPASS_MIXER_TC            100     /* Bypass mixer time */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c
index 10ea700..c4d3403 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c
@@ -21,11 +21,13 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#include <string.h> // memset
 #include "LVDBE.h"
 #include "LVDBE_Private.h"
 #include "VectorArithmetic.h"
 #include "AGC.h"
 #include "LVDBE_Coeffs.h"               /* Filter coefficients */
+#include <log/log.h>
 
 /********************************************************************************************/
 /*                                                                                          */
@@ -187,42 +189,49 @@
 LVDBE_ReturnStatus_en LVDBE_Process(LVDBE_Handle_t hInstance,
     const LVM_FLOAT *pInData,
     LVM_FLOAT *pOutData,
-    LVM_UINT16 NumSamples)
+    const LVM_UINT16 NrFrames) // updated to use samples = frames * channels.
 {
-
   LVDBE_Instance_t *pInstance =(LVDBE_Instance_t *)hInstance;
-  LVM_FLOAT *pScratch_in = (LVM_FLOAT *)pInstance->MemoryTable.Region
-  [LVDBE_MEMREGION_SCRATCH].pBaseAddress;
-  LVM_FLOAT *pScratch = pScratch_in + 2 * NumSamples;
-  LVM_FLOAT *pMono;
-  LVM_INT32 ii = 0;
 
-  /* Scratch for Volume Control starts at offset of 4*NumSamples float values from pScratch */
-  LVM_FLOAT           *pScratchVol = (LVM_FLOAT *)(&pScratch_in[4 * NumSamples]);
-//  LVM_INT16 *pScratchVol_int = (LVM_INT16 *)(pScratchVol);
+  /*Extract number of Channels info*/
+#ifdef SUPPORT_MC
+  // Mono passed in as stereo
+  const LVM_INT32 NrChannels = pInstance->Params.NrChannels == 1
+      ? 2 : pInstance->Params.NrChannels;
+#else
+  const LVM_INT32 NrChannels = 2; // FCC_2
+#endif
+  const LVM_INT32 NrSamples = NrChannels * NrFrames;
 
-  /* Scratch for Mono path starts at offset of 6*NumSamples 32-bit values from pScratch */
-  pMono = &pScratch_in[4 * NumSamples];
+  /* Space to store DBE path computation */
+  LVM_FLOAT * const pScratch =
+          (LVM_FLOAT *)pInstance->MemoryTable.Region[LVDBE_MEMREGION_SCRATCH].pBaseAddress;
 
   /*
-   * Check the number of samples is not too large
+   * Scratch for Mono path starts at offset of
+   * NrSamples float values from pScratch.
    */
-  if (NumSamples > pInstance->Capabilities.MaxBlockSize)
+  LVM_FLOAT * const pMono = pScratch + NrSamples;
+
+  /*
+   * TRICKY: pMono is used and discarded by the DBE path.
+   *         so it is available for use for the pScratchVol
+   *         path which is computed afterwards.
+   *
+   * Space to store Volume Control path computation.
+   * This is identical to pMono (see TRICKY comment).
+   */
+  LVM_FLOAT * const pScratchVol = pMono;
+
+  /*
+   * Check the number of frames is not too large
+   */
+  if (NrFrames > pInstance->Capabilities.MaxBlockSize)
   {
-    return(LVDBE_TOOMANYSAMPLES);
+    return LVDBE_TOOMANYSAMPLES;
   }
 
   /*
-   * Convert 16-bit samples to Float
-   */
-  Copy_Float(pInData, /* Source 16-bit data    */
-      pScratch_in, /* Dest. 32-bit data     */
-      (LVM_INT16)(2 * NumSamples)); /* Left and right        */
-
-  for (ii = 0; ii < 2 * NumSamples; ii++) {
-    pScratch[ii] = pScratch_in[ii];
-  }
-  /*
    * Check if the algorithm is enabled
    */
   /* DBE path is processed when DBE is ON or during On/Off transitions */
@@ -230,50 +239,81 @@
       (LVC_Mixer_GetCurrent(&pInstance->pData->BypassMixer.MixerStream[0])
           !=LVC_Mixer_GetTarget(&pInstance->pData->BypassMixer.MixerStream[0])))
   {
+    // make copy of input data
+    Copy_Float(pInData,
+        pScratch,
+        (LVM_INT16)NrSamples);
 
     /*
      * Apply the high pass filter if selected
      */
     if (pInstance->Params.HPFSelect == LVDBE_HPF_ON)
     {
+#ifdef SUPPORT_MC
+      BQ_MC_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance, /* Filter instance      */
+          pScratch, /* Source               */
+          pScratch, /* Destination          */
+          (LVM_INT16)NrFrames,
+          (LVM_INT16)NrChannels);
+#else
       BQ_2I_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance,/* Filter instance      */
-          (LVM_FLOAT *)pScratch, /* Source               */
-          (LVM_FLOAT *)pScratch, /* Destination          */
-          (LVM_INT16)NumSamples); /* Number of samples    */
+          pScratch, /* Source               */
+          pScratch, /* Destination          */
+          (LVM_INT16)NrFrames);
+#endif
     }
 
     /*
      * Create the mono stream
      */
-    From2iToMono_Float((LVM_FLOAT *)pScratch, /* Stereo source         */
+#ifdef SUPPORT_MC
+    FromMcToMono_Float(pScratch, /* Source */
+        pMono, /* Mono destination */
+        (LVM_INT16)NrFrames,  /* Number of frames */
+        (LVM_INT16)NrChannels);
+#else
+    From2iToMono_Float(pScratch, /* Stereo source         */
         pMono, /* Mono destination      */
-        (LVM_INT16)NumSamples); /* Number of samples     */
+        (LVM_INT16)NrFrames);
+#endif
 
     /*
      * Apply the band pass filter
      */
     BP_1I_D32F32C30_TRC_WRA_02(&pInstance->pCoef->BPFInstance, /* Filter instance       */
-        (LVM_FLOAT *)pMono, /* Source                */
-        (LVM_FLOAT *)pMono, /* Destination           */
-        (LVM_INT16)NumSamples); /* Number of samples     */
+        pMono, /* Source                */
+        pMono, /* Destination           */
+        (LVM_INT16)NrFrames);
 
     /*
      * Apply the AGC and mix
      */
+#ifdef SUPPORT_MC
+    AGC_MIX_VOL_Mc1Mon_D32_WRA(&pInstance->pData->AGCInstance, /* Instance pointer      */
+        pScratch, /* Source         */
+        pMono, /* Mono band pass source */
+        pScratch, /* Destination    */
+        NrFrames, /* Number of frames     */
+        NrChannels); /* Number of channels     */
+#else
     AGC_MIX_VOL_2St1Mon_D32_WRA(&pInstance->pData->AGCInstance, /* Instance pointer      */
         pScratch, /* Stereo source         */
         pMono, /* Mono band pass source */
         pScratch, /* Stereo destination    */
-        NumSamples); /* Number of samples     */
+        NrFrames);
+#endif
 
-    for (ii = 0; ii < 2 * NumSamples; ii++) {
+    for (LVM_INT32 ii = 0; ii < NrSamples; ++ii) {
       //TODO: replace with existing clamping function
-      if(pScratch[ii] < -1.0) {
+      if (pScratch[ii] < -1.0) {
         pScratch[ii] = -1.0;
-      } else if(pScratch[ii] > 1.0) {
+      } else if (pScratch[ii] > 1.0) {
         pScratch[ii] = 1.0;
       }
     }
+  } else {
+    // clear DBE processed path
+    memset(pScratch, 0, sizeof(*pScratch) * NrSamples);
   }
 
   /* Bypass Volume path is processed when DBE is OFF or during On/Off transitions */
@@ -286,21 +326,40 @@
      * The algorithm is disabled but volume management is required to compensate for
      * headroom and volume (if enabled)
      */
-    LVC_MixSoft_1St_D16C31_SAT(&pInstance->pData->BypassVolume,
-        pScratch_in,
+#ifdef SUPPORT_MC
+    LVC_MixSoft_Mc_D16C31_SAT(&pInstance->pData->BypassVolume,
+        pInData,
         pScratchVol,
-        (LVM_INT16)(2 * NumSamples)); /* Left and right */
+        (LVM_INT16)NrFrames,
+        (LVM_INT16)NrChannels);
+#else
+    LVC_MixSoft_1St_D16C31_SAT(&pInstance->pData->BypassVolume,
+        pInData,
+        pScratchVol,
+        (LVM_INT16)NrSamples); /* Left and right, really # samples */
+#endif
+  } else {
+    // clear bypass volume path
+    memset(pScratchVol, 0, sizeof(*pScratchVol) * NrSamples);
   }
 
   /*
    * Mix DBE processed path and bypass volume path
    */
+#ifdef SUPPORT_MC
+  LVC_MixSoft_2Mc_D16C31_SAT(&pInstance->pData->BypassMixer,
+      pScratch,
+      pScratchVol,
+      pOutData,
+      (LVM_INT16)NrFrames,
+      (LVM_INT16)NrChannels);
+#else
   LVC_MixSoft_2St_D16C31_SAT(&pInstance->pData->BypassMixer,
       pScratch,
       pScratchVol,
       pOutData,
-      (LVM_INT16)(2 * NumSamples));
-
-  return(LVDBE_SUCCESS);
+      (LVM_INT16)NrSamples);
+#endif
+  return LVDBE_SUCCESS;
 }
 #endif
diff --git a/media/libeffects/lvm/lib/Bundle/lib/LVM.h b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
index 9b6da31..83ecae1 100644
--- a/media/libeffects/lvm/lib/Bundle/lib/LVM.h
+++ b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
@@ -296,6 +296,9 @@
     /* Spectrum Analyzer parameters Control */
     LVM_PSA_Mode_en             PSA_Enable;
     LVM_PSA_DecaySpeed_en       PSA_PeakDecayRate;      /* Peak value decay rate*/
+#ifdef SUPPORT_MC
+    LVM_INT32                   NrChannels;
+#endif
 
 } LVM_ControlParams_t;
 
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c
index 0a3c30e..37272e3 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c
@@ -25,6 +25,8 @@
 #include "LVM_Private.h"
 #include "VectorArithmetic.h"
 
+#include <log/log.h>
+
 /****************************************************************************************/
 /*                                                                                      */
 /* FUNCTION:                 LVM_BufferManagedIn                                        */
@@ -62,8 +64,11 @@
     LVM_Instance_t   *pInstance = (LVM_Instance_t  *)hInstance;
     LVM_Buffer_t     *pBuffer;
     LVM_FLOAT        *pDest;
+#ifdef SUPPORT_MC
+    LVM_INT16        NumChannels = pInstance->NrChannels;
+#else
     LVM_INT16        NumChannels = 2;
-
+#endif
 
     /*
      * Set the processing address pointers
@@ -207,8 +212,7 @@
     LVM_Instance_t   *pInstance = (LVM_Instance_t  *)hInstance;
     LVM_Buffer_t     *pBuffer;
     LVM_INT16        *pDest;
-    LVM_INT16        NumChannels =2;
-
+    LVM_INT16        NumChannels = 2;
 
     /*
      * Set the processing address pointers
@@ -499,7 +503,7 @@
         if (pBuffer->OutDelaySamples != 0)
         {
             Copy_16(&pBuffer->OutDelayBuffer[0],                    /* Source */
-                    pDest,                                          /* Detsination */
+                    pDest,                                          /* Destination */
                     (LVM_INT16)(2*pBuffer->OutDelaySamples));       /* Number of delay samples */
             pDest += 2 * pBuffer->OutDelaySamples;                  /* Update the output pointer */
             pBuffer->SamplesToOutput = (LVM_INT16)(pBuffer->SamplesToOutput - pBuffer->OutDelaySamples); /* Update the numbr of samples to output */
@@ -750,7 +754,11 @@
     LVM_INT16       NumSamples;
     LVM_FLOAT       *pStart;
     LVM_FLOAT       *pDest;
-
+#ifdef SUPPORT_MC
+    LVM_INT32       NrChannels = pInstance->NrChannels;
+#define NrFrames NumSamples  // alias for clarity
+#define FrameCount SampleCount
+#endif
 
     /*
      * Set the pointers
@@ -758,7 +766,6 @@
     NumSamples = pBuffer->SamplesToOutput;
     pStart     = pBuffer->pScratch;
 
-
     /*
      * check if it is the first call of a block
       */
@@ -786,14 +793,25 @@
             /*
              * Copy all output delay samples to the output
              */
+#ifdef SUPPORT_MC
             Copy_Float(&pBuffer->OutDelayBuffer[0],                /* Source */
-                       pDest,                                      /* Detsination */
+                       pDest,                                      /* Destination */
+                       /* Number of delay samples */
+                       (LVM_INT16)(NrChannels * pBuffer->OutDelaySamples));
+#else
+            Copy_Float(&pBuffer->OutDelayBuffer[0],                /* Source */
+                       pDest,                                      /* Destination */
                        (LVM_INT16)(2 * pBuffer->OutDelaySamples)); /* Number of delay samples */
+#endif
 
             /*
              * Update the pointer and sample counts
              */
+#ifdef SUPPORT_MC
+            pDest += NrChannels * pBuffer->OutDelaySamples; /* Output sample pointer */
+#else
             pDest += 2 * pBuffer->OutDelaySamples; /* Output sample pointer */
+#endif
             NumSamples = (LVM_INT16)(NumSamples - pBuffer->OutDelaySamples); /* Samples left \
                                                                                 to send */
             pBuffer->OutDelaySamples = 0; /* No samples left in the buffer */
@@ -803,23 +821,40 @@
             /*
              * Copy only some of the ouput delay samples to the output
              */
+#ifdef SUPPORT_MC
             Copy_Float(&pBuffer->OutDelayBuffer[0],                    /* Source */
-                       pDest,                                          /* Detsination */
+                       pDest,                                          /* Destination */
+                       (LVM_INT16)(NrChannels * NrFrames));       /* Number of delay samples */
+#else
+            Copy_Float(&pBuffer->OutDelayBuffer[0],                    /* Source */
+                       pDest,                                          /* Destination */
                        (LVM_INT16)(2 * NumSamples));       /* Number of delay samples */
+#endif
 
             /*
              * Update the pointer and sample counts
              */
+#ifdef SUPPORT_MC
+            pDest += NrChannels * NrFrames; /* Output sample pointer */
+#else
             pDest += 2 * NumSamples; /* Output sample pointer */
+#endif
             /* No samples left in the buffer */
             pBuffer->OutDelaySamples = (LVM_INT16)(pBuffer->OutDelaySamples - NumSamples);
 
             /*
              * Realign the delay buffer data to avoid using circular buffer management
              */
+#ifdef SUPPORT_MC
+            Copy_Float(&pBuffer->OutDelayBuffer[NrChannels * NrFrames],         /* Source */
+                       &pBuffer->OutDelayBuffer[0],                    /* Destination */
+                       /* Number of samples to move */
+                       (LVM_INT16)(NrChannels * pBuffer->OutDelaySamples));
+#else
             Copy_Float(&pBuffer->OutDelayBuffer[2 * NumSamples],         /* Source */
                        &pBuffer->OutDelayBuffer[0],                    /* Destination */
                        (LVM_INT16)(2 * pBuffer->OutDelaySamples)); /* Number of samples to move */
+#endif
             NumSamples = 0;                                /* Samples left to send */
         }
     }
@@ -836,13 +871,23 @@
             /*
              * Copy all processed samples to the output
              */
+#ifdef SUPPORT_MC
             Copy_Float(pStart,                                      /* Source */
-                       pDest,                                       /* Detsination */
+                       pDest,                                       /* Destination */
+                       (LVM_INT16)(NrChannels * FrameCount)); /* Number of processed samples */
+#else
+            Copy_Float(pStart,                                      /* Source */
+                       pDest,                                       /* Destination */
                        (LVM_INT16)(2 * SampleCount)); /* Number of processed samples */
+#endif
             /*
              * Update the pointer and sample counts
              */
+#ifdef SUPPORT_MC
+            pDest      += NrChannels * FrameCount;                 /* Output sample pointer */
+#else
             pDest      += 2 * SampleCount;                          /* Output sample pointer */
+#endif
             NumSamples  = (LVM_INT16)(NumSamples - SampleCount);    /* Samples left to send */
             SampleCount = 0; /* No samples left in the buffer */
         }
@@ -851,14 +896,25 @@
             /*
              * Copy only some processed samples to the output
              */
+#ifdef SUPPORT_MC
+            Copy_Float(pStart,                                         /* Source */
+                       pDest,                                          /* Destination */
+                       (LVM_INT16)(NrChannels * NrFrames));  /* Number of processed samples */
+#else
             Copy_Float(pStart,                                         /* Source */
                        pDest,                                          /* Destination */
                        (LVM_INT16)(2 * NumSamples));     /* Number of processed samples */
+#endif
             /*
              * Update the pointers and sample counts
                */
+#ifdef SUPPORT_MC
+            pStart      += NrChannels * NrFrames;               /* Processed sample pointer */
+            pDest       += NrChannels * NrFrames;               /* Output sample pointer */
+#else
             pStart      += 2 * NumSamples;                        /* Processed sample pointer */
             pDest       += 2 * NumSamples;                        /* Output sample pointer */
+#endif
             SampleCount  = (LVM_INT16)(SampleCount - NumSamples); /* Processed samples left */
             NumSamples   = 0;                                     /* Clear the sample count */
         }
@@ -870,9 +926,16 @@
      */
     if (SampleCount != 0)
     {
+#ifdef SUPPORT_MC
+        Copy_Float(pStart,                                                 /* Source */
+                   /* Destination */
+                   &pBuffer->OutDelayBuffer[NrChannels * pBuffer->OutDelaySamples],
+                   (LVM_INT16)(NrChannels * FrameCount));      /* Number of processed samples */
+#else
         Copy_Float(pStart,                                                 /* Source */
                    &pBuffer->OutDelayBuffer[2 * pBuffer->OutDelaySamples], /* Destination */
                    (LVM_INT16)(2 * SampleCount));               /* Number of processed samples */
+#endif
         /* Update the buffer count */
         pBuffer->OutDelaySamples = (LVM_INT16)(pBuffer->OutDelaySamples + SampleCount);
     }
@@ -1063,14 +1126,24 @@
 {
 
     LVM_Instance_t      *pInstance  = (LVM_Instance_t  *)hInstance;
-    LVM_INT16           NumChannels =2;
+#ifdef SUPPORT_MC
+    LVM_INT16           NumChannels = pInstance->NrChannels;
+#undef NrFrames
+#define NrFrames (*pNumSamples) // alias for clarity
+#else
+    LVM_INT16           NumChannels = 2;
+#endif
 
 
     /*
      * Update sample counts
      */
     pInstance->pInputSamples    += (LVM_INT16)(*pNumSamples * NumChannels); /* Update the I/O pointers */
+#ifdef SUPPORT_MC
+    pInstance->pOutputSamples   += (LVM_INT16)(NrFrames * NumChannels);
+#else
     pInstance->pOutputSamples   += (LVM_INT16)(*pNumSamples * 2);
+#endif
     pInstance->SamplesToProcess  = (LVM_INT16)(pInstance->SamplesToProcess - *pNumSamples); /* Update the sample count */
 
     /*
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
index cfe53b8..7b85f23 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
@@ -28,6 +28,8 @@
 #include "LVM_Tables.h"
 #include "LVM_Private.h"
 
+#include <log/log.h>
+
 /****************************************************************************************/
 /*                                                                                      */
 /* FUNCTION:           LVM_SetControlParameters                                         */
@@ -75,12 +77,22 @@
         (pParams->SampleRate != LVM_FS_16000) && (pParams->SampleRate != LVM_FS_22050) && (pParams->SampleRate != LVM_FS_24000)       &&
         (pParams->SampleRate != LVM_FS_32000) && (pParams->SampleRate != LVM_FS_44100) && (pParams->SampleRate != LVM_FS_48000))      ||
 #endif
+#ifdef SUPPORT_MC
+        ((pParams->SourceFormat != LVM_STEREO) &&
+         (pParams->SourceFormat != LVM_MONOINSTEREO) &&
+         (pParams->SourceFormat != LVM_MONO) &&
+         (pParams->SourceFormat != LVM_MULTICHANNEL)) ||
+#else
         ((pParams->SourceFormat != LVM_STEREO) && (pParams->SourceFormat != LVM_MONOINSTEREO) && (pParams->SourceFormat != LVM_MONO)) ||
+#endif
         (pParams->SpeakerType > LVM_EX_HEADPHONES))
     {
         return (LVM_OUTOFRANGE);
     }
 
+#ifdef SUPPORT_MC
+    pInstance->Params.NrChannels = pParams->NrChannels;
+#endif
     /*
      * Cinema Sound parameters
      */
@@ -569,6 +581,10 @@
     } while ((pInstance->ControlPending != LVM_FALSE) &&
              (Count > 0));
 
+#ifdef SUPPORT_MC
+    pInstance->NrChannels = LocalParams.NrChannels;
+#endif
+
     /* Clear all internal data if format change*/
     if(LocalParams.SourceFormat != pInstance->Params.SourceFormat)
     {
@@ -719,6 +735,9 @@
         DBE_Params.HeadroomdB       = 0;
         DBE_Params.VolumeControl    = LVDBE_VOLUME_OFF;
         DBE_Params.VolumedB         = 0;
+#ifdef SUPPORT_MC
+        DBE_Params.NrChannels         = LocalParams.NrChannels;
+#endif
 
         /*
          * Make the changes
@@ -775,7 +794,9 @@
         {
             EQNB_Params.SourceFormat = LVEQNB_MONOINSTEREO;     /* Force to Mono-in-Stereo mode */
         }
-
+#ifdef SUPPORT_MC
+        EQNB_Params.NrChannels         = LocalParams.NrChannels;
+#endif
 
         /*
          * Set the control flag
@@ -849,7 +870,9 @@
         CS_Params.SampleRate  = LocalParams.SampleRate;
         CS_Params.ReverbLevel = LocalParams.VirtualizerReverbLevel;
         CS_Params.EffectLevel = LocalParams.CS_EffectLevel;
-
+#ifdef SUPPORT_MC
+        CS_Params.NrChannels  = LocalParams.NrChannels;
+#endif
 
         /*
          * Set the control flag
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
index 26c1c4f..ade329b 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
@@ -584,8 +584,11 @@
     /*
      * DC removal filter
      */
+#ifdef SUPPORT_MC
+    DC_Mc_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
+#else
     DC_2I_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
-
+#endif
 
     /*
      * Treble Enhancement
@@ -1039,7 +1042,11 @@
     LVM_SetHeadroomParams(hInstance, &HeadroomParams);
 
     /* DC removal filter */
+#ifdef SUPPORT_MC
+    DC_Mc_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
+#else
     DC_2I_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
+#endif
 
     return LVM_SUCCESS;
 }
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
index b453222..19d1532 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
@@ -144,12 +144,19 @@
     LVM_FLOAT               *pScratch;          /* Bundle scratch buffer */
 
     LVM_INT16               BufferState;        /* Buffer status */
+#ifdef SUPPORT_MC
+    LVM_FLOAT               InDelayBuffer[3 * LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE];
+#else
     LVM_FLOAT               InDelayBuffer[6 * MIN_INTERNAL_BLOCKSIZE]; /* Input buffer delay line, \
                                                                            left and right */
+#endif
     LVM_INT16               InDelaySamples;     /* Number of samples in the input delay buffer */
-
+#ifdef SUPPORT_MC
+    LVM_FLOAT               OutDelayBuffer[LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE];
+#else
     LVM_FLOAT               OutDelayBuffer[2 * MIN_INTERNAL_BLOCKSIZE]; /* Output buffer delay \
                                                                                       line */
+#endif
     LVM_INT16               OutDelaySamples;    /* Number of samples in the output delay buffer, \
                                                                              left and right */
     LVM_INT16               SamplesToOutput;    /* Samples to write to the output */
@@ -282,6 +289,10 @@
 
     LVM_INT16              NoSmoothVolume;      /* Enable or disable smooth volume changes*/
 
+#ifdef SUPPORT_MC
+    LVM_INT16              NrChannels;
+#endif
+
 } LVM_Instance_t;
 
 
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
index 4a19a13..94ba278 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
@@ -65,6 +65,10 @@
     LVM_FLOAT           *pToProcess = (LVM_FLOAT *)pInData;
     LVM_FLOAT           *pProcessed = pOutData;
     LVM_ReturnStatus_en  Status;
+#ifdef SUPPORT_MC
+    LVM_INT32           NrChannels  = pInstance->NrChannels;
+#define NrFrames SampleCount  // alias for clarity
+#endif
 
     /*
      * Check if the number of samples is zero
@@ -112,6 +116,10 @@
     if (pInstance->ControlPending == LVM_TRUE)
     {
         Status = LVM_ApplyNewSettings(hInstance);
+#ifdef SUPPORT_MC
+        /* Update the local variable NrChannels from pInstance->NrChannels value */
+        NrChannels = pInstance->NrChannels;
+#endif
 
         if(Status != LVM_SUCCESS)
         {
@@ -130,6 +138,9 @@
                        (LVM_INT16)NumSamples);                 /* Number of input samples */
         pInput     = pOutData;
         pToProcess = pOutData;
+#ifdef SUPPORT_MC
+        NrChannels = 2;
+#endif
     }
 
 
@@ -153,7 +164,6 @@
          */
         if (SampleCount != 0)
         {
-
             /*
              * Apply ConcertSound if required
              */
@@ -171,10 +181,18 @@
              */
             if (pInstance->VC_Active!=0)
             {
+#ifdef SUPPORT_MC
+                LVC_MixSoft_Mc_D16C31_SAT(&pInstance->VC_Volume,
+                                       pToProcess,
+                                       pProcessed,
+                                       (LVM_INT16)(NrFrames),
+                                       NrChannels);
+#else
                 LVC_MixSoft_1St_D16C31_SAT(&pInstance->VC_Volume,
                                        pToProcess,
                                        pProcessed,
                                        (LVM_INT16)(2 * SampleCount));     /* Left and right*/
+#endif
                 pToProcess = pProcessed;
             }
 
@@ -221,20 +239,33 @@
                 /*
                  * Apply the filter
                  */
+#ifdef SUPPORT_MC
+                FO_Mc_D16F32C15_LShx_TRC_WRA_01(&pInstance->pTE_State->TrebleBoost_State,
+                                           pProcessed,
+                                           pProcessed,
+                                           (LVM_INT16)NrFrames,
+                                           (LVM_INT16)NrChannels);
+#else
                 FO_2I_D16F32C15_LShx_TRC_WRA_01(&pInstance->pTE_State->TrebleBoost_State,
                                            pProcessed,
                                            pProcessed,
                                            (LVM_INT16)SampleCount);
+#endif
 
             }
-
-            /*
-             * Volume balance
-             */
-            LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
-                                            pProcessed,
-                                            pProcessed,
-                                            SampleCount);
+#ifdef SUPPORT_MC
+            /* TODO - Multichannel support to be added */
+            if (NrChannels == 2)
+#endif
+            {
+                /*
+                 * Volume balance
+                 */
+                LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
+                                              pProcessed,
+                                              pProcessed,
+                                              SampleCount);
+            }
 
             /*
              * Perform Parametric Spectum Analysis
@@ -242,28 +273,39 @@
             if ((pInstance->Params.PSA_Enable == LVM_PSA_ON) &&
                                             (pInstance->InstParams.PSA_Included == LVM_PSA_ON))
             {
-                    From2iToMono_Float(pProcessed,
-                                       pInstance->pPSAInput,
-                                       (LVM_INT16)(SampleCount));
+#ifdef SUPPORT_MC
+                FromMcToMono_Float(pProcessed,
+                                   pInstance->pPSAInput,
+                                   (LVM_INT16)(NrFrames),
+                                   NrChannels);
+#else
+                From2iToMono_Float(pProcessed,
+                                   pInstance->pPSAInput,
+                                   (LVM_INT16)(SampleCount));
+#endif
 
-                    LVPSA_Process(pInstance->hPSAInstance,
-                            pInstance->pPSAInput,
-                            (LVM_UINT16)(SampleCount),
-                            AudioTime);
+                LVPSA_Process(pInstance->hPSAInstance,
+                        pInstance->pPSAInput,
+                        (LVM_UINT16)(SampleCount),
+                        AudioTime);
             }
 
-
             /*
              * DC removal
              */
+#ifdef SUPPORT_MC
+            DC_Mc_D16_TRC_WRA_01(&pInstance->DC_RemovalInstance,
+                                 pProcessed,
+                                 pProcessed,
+                                 (LVM_INT16)NrFrames,
+                                 NrChannels);
+#else
             DC_2I_D16_TRC_WRA_01(&pInstance->DC_RemovalInstance,
                                  pProcessed,
                                  pProcessed,
                                  (LVM_INT16)SampleCount);
-
-
+#endif
         }
-
         /*
          * Manage the output buffer
          */
@@ -497,4 +539,4 @@
 
     return(LVM_SUCCESS);
 }
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Common/lib/AGC.h b/media/libeffects/lvm/lib/Common/lib/AGC.h
index 9a3d35d..06e742e 100644
--- a/media/libeffects/lvm/lib/Common/lib/AGC.h
+++ b/media/libeffects/lvm/lib/Common/lib/AGC.h
@@ -78,6 +78,15 @@
                                  const LVM_FLOAT            *pMonoSrc,      /* Mono source */
                                  LVM_FLOAT                  *pDst,          /* Stereo destination */
                                  LVM_UINT16                 n);             /* Number of samples */
+#ifdef SUPPORT_MC
+void AGC_MIX_VOL_Mc1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_FLOAT_t  *pInstance,  /* Instance pointer */
+                                 const LVM_FLOAT            *pStSrc,      /* Source */
+                                 const LVM_FLOAT            *pMonoSrc,    /* Mono source */
+                                 LVM_FLOAT                  *pDst,        /* Destination */
+                                 LVM_UINT16                 NrFrames,     /* Number of frames */
+                                 LVM_UINT16                 NrChannels);  /* Number of channels */
+#endif
+
 #else
 void AGC_MIX_VOL_2St1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_D32_t  *pInstance,     /* Instance pointer */
                                  const LVM_INT32            *pStSrc,        /* Stereo source */
diff --git a/media/libeffects/lvm/lib/Common/lib/BIQUAD.h b/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
index 3ee7f63..01539b2 100644
--- a/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
+++ b/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
@@ -30,8 +30,17 @@
 #ifdef BUILD_FLOAT
 typedef struct
 {
+#ifdef SUPPORT_MC
+    /* The memory region created by this structure instance is typecast
+     * into another structure containing a pointer and an array of filter
+     * coefficients. In one case this memory region is used for storing
+     * DC component of channels
+     */
+    LVM_FLOAT *pStorage;
+    LVM_FLOAT Storage[LVM_MAX_CHANNELS];
+#else
     LVM_FLOAT Storage[6];
-
+#endif
 } Biquad_FLOAT_Instance_t;
 #else
 typedef struct
@@ -179,7 +188,12 @@
 
 typedef struct
 {
-    LVM_FLOAT Storage[ (2 * 2) ];  /* Two channels, two taps of size LVM_INT32 */
+#ifdef SUPPORT_MC
+    /* LVM_MAX_CHANNELS channels, two taps of size LVM_FLOAT */
+    LVM_FLOAT Storage[ (LVM_MAX_CHANNELS * 2) ];
+#else
+    LVM_FLOAT Storage[ (2 * 2) ];  /* Two channels, two taps of size LVM_FLOAT */
+#endif
 } Biquad_2I_Order1_FLOAT_Taps_t;
 #else
 typedef struct
@@ -197,12 +211,17 @@
 #ifdef BUILD_FLOAT
 typedef struct
 {
-    LVM_FLOAT Storage[ (1 * 4) ];  /* One channel, four taps of size LVM_INT32 */
+    LVM_FLOAT Storage[ (1 * 4) ];  /* One channel, four taps of size LVM_FLOAT */
 } Biquad_1I_Order2_FLOAT_Taps_t;
 
 typedef struct
 {
-    LVM_FLOAT Storage[ (2 * 4) ];  /* Two channels, four taps of size LVM_INT32 */
+#ifdef SUPPORT_MC
+    /* LVM_MAX_CHANNELS, four taps of size LVM_FLOAT */
+    LVM_FLOAT Storage[ (LVM_MAX_CHANNELS * 4) ];
+#else
+    LVM_FLOAT Storage[ (2 * 4) ];  /* Two channels, four taps of size LVM_FLOAT */
+#endif
 } Biquad_2I_Order2_FLOAT_Taps_t;
 #else
 typedef struct
@@ -366,6 +385,13 @@
                                             LVM_FLOAT                    *pDataIn,
                                             LVM_FLOAT                    *pDataOut,
                                             LVM_INT16                 NrSamples);
+#ifdef SUPPORT_MC
+void BQ_MC_D32F32C30_TRC_WRA_01 (           Biquad_FLOAT_Instance_t      *pInstance,
+                                            LVM_FLOAT                    *pDataIn,
+                                            LVM_FLOAT                    *pDataOut,
+                                            LVM_INT16                    NrFrames,
+                                            LVM_INT16                    NrChannels);
+#endif
 #else
 void BQ_2I_D32F32Cll_TRC_WRA_01_Init (      Biquad_Instance_t       *pInstance,
                                             Biquad_2I_Order2_Taps_t *pTaps,
@@ -434,6 +460,13 @@
                                  LVM_FLOAT                     *pDataIn,
                                  LVM_FLOAT                     *pDataOut,
                                  LVM_INT16                     NrSamples);
+#ifdef SUPPORT_MC
+void FO_Mc_D16F32C15_LShx_TRC_WRA_01(Biquad_FLOAT_Instance_t  *pInstance,
+                                     LVM_FLOAT                *pDataIn,
+                                     LVM_FLOAT                *pDataOut,
+                                     LVM_INT16                NrFrames,
+                                     LVM_INT16                NrChannels);
+#endif
 #else
 void FO_1I_D32F32Cll_TRC_WRA_01_Init(       Biquad_Instance_t       *pInstance,
                                             Biquad_1I_Order1_Taps_t *pTaps,
@@ -527,6 +560,13 @@
                                     LVM_FLOAT               *pDataIn,
                                     LVM_FLOAT               *pDataOut,
                                     LVM_INT16               NrSamples);
+#ifdef SUPPORT_MC
+void PK_Mc_D32F32C14G11_TRC_WRA_01(Biquad_FLOAT_Instance_t       *pInstance,
+                                   LVM_FLOAT               *pDataIn,
+                                   LVM_FLOAT               *pDataOut,
+                                   LVM_INT16               NrFrames,
+                                   LVM_INT16               NrChannels);
+#endif
 #else
 void PK_2I_D32F32C14G11_TRC_WRA_01 (        Biquad_Instance_t       *pInstance,
                                             LVM_INT32                    *pDataIn,
@@ -540,12 +580,22 @@
 
 /*** 16 bit data path STEREO ******************************************************/
 #ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void DC_Mc_D16_TRC_WRA_01_Init     (        Biquad_FLOAT_Instance_t       *pInstance);
+
+void DC_Mc_D16_TRC_WRA_01          (        Biquad_FLOAT_Instance_t       *pInstance,
+                                            LVM_FLOAT               *pDataIn,
+                                            LVM_FLOAT               *pDataOut,
+                                            LVM_INT16               NrFrames,
+                                            LVM_INT16               NrChannels);
+#else
 void DC_2I_D16_TRC_WRA_01_Init     (        Biquad_FLOAT_Instance_t       *pInstance);
 
 void DC_2I_D16_TRC_WRA_01          (        Biquad_FLOAT_Instance_t       *pInstance,
                                             LVM_FLOAT               *pDataIn,
                                             LVM_FLOAT               *pDataOut,
                                             LVM_INT16               NrSamples);
+#endif
 #else
 void DC_2I_D16_TRC_WRA_01_Init     (        Biquad_Instance_t       *pInstance);
 
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h b/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h
index 81e288c..a76354d 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h
@@ -44,7 +44,15 @@
 
 typedef struct
 {
-    LVM_INT32 Storage[6];
+    /*
+     * The memory area created using this structure is internally
+     * typecast to LVM_Timer_Instance_Private_t and used.
+     * The LVM_Timer_Instance_Private_t structure has 3 pointer type elements
+     * 2 elements of type LVM_INT32 and one element of type LVM_INT16.
+     * Inorder to cater both 32 and 64 bit builds, Storage array should
+     * have a minimum of 9 elements of type LVM_INT32.
+     */
+    LVM_INT32 Storage[9];
 
 } LVM_Timer_Instance_t;
 
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index ea16072..303b62d 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -122,6 +122,12 @@
 
 #endif // NATIVE_FLOAT_BUFFER
 
+#ifdef SUPPORT_MC
+#define LVM_MAX_CHANNELS 8 // FCC_8
+#else
+#define LVM_MAX_CHANNELS 2 // FCC_2
+#endif
+
 /****************************************************************************************/
 /*                                                                                      */
 /*  Standard Enumerated types                                                           */
@@ -143,6 +149,9 @@
     LVM_STEREO          = 0,
     LVM_MONOINSTEREO    = 1,
     LVM_MONO            = 2,
+#ifdef SUPPORT_MC
+    LVM_MULTICHANNEL    = 3,
+#endif
     LVM_SOURCE_DUMMY    = LVM_MAXENUM
 } LVM_Format_en;
 
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 0ba20a3..7468a90 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -47,6 +47,16 @@
 void Copy_Float(                 const LVM_FLOAT *src,
                                  LVM_FLOAT *dst,
                                  LVM_INT16 n );
+#ifdef SUPPORT_MC
+void Copy_Float_Mc_Stereo(       const LVM_FLOAT *src,
+                                 LVM_FLOAT *dst,
+                                 LVM_INT16 NrFrames,
+                                 LVM_INT32 NrChannels);
+void Copy_Float_Stereo_Mc(       const LVM_FLOAT *src,
+                                 LVM_FLOAT *dst,
+                                 LVM_INT16 NrFrames,
+                                 LVM_INT32 NrChannels);
+#endif
 #else
 void Copy_16(                 const LVM_INT16 *src,
                                     LVM_INT16 *dst,
@@ -181,6 +191,12 @@
 void From2iToMono_Float(         const LVM_FLOAT  *src,
                                  LVM_FLOAT  *dst,
                                  LVM_INT16 n);
+#ifdef SUPPORT_MC
+void FromMcToMono_Float(const LVM_FLOAT *src,
+                        LVM_FLOAT *dst,
+                        LVM_INT16 NrFrames,
+                        LVM_INT16 NrChannels);
+#endif
 #else
 void From2iToMono_32(         const LVM_INT32  *src,
                                     LVM_INT32  *dst,
diff --git a/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c b/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c
index fa9f01f..5c8655f 100644
--- a/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c
+++ b/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c
@@ -305,4 +305,150 @@
 
     return;
 }
+#ifdef SUPPORT_MC
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                  AGC_MIX_VOL_Mc1Mon_D32_WRA                                */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*    Apply AGC and mix signals                                                         */
+/*                                                                                      */
+/*                                                                                      */
+/*  McSrc   ------------------|                                                         */
+/*                            |                                                         */
+/*              ______       _|_        ________                                        */
+/*             |      |     |   |      |        |                                       */
+/*  MonoSrc -->| AGC  |---->| + |----->| Volume |------------------------------+--->    */
+/*             | Gain |     |___|      | Gain   |                              |        */
+/*             |______|                |________|                              |        */
+/*                /|\                               __________     ________    |        */
+/*                 |                               |          |   |        |   |        */
+/*                 |-------------------------------| AGC Gain |<--| Peak   |<--|        */
+/*                                                 | Update   |   | Detect |            */
+/*                                                 |__________|   |________|            */
+/*                                                                                      */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  pInstance               Instance pointer                                            */
+/*  pMcSrc                  Multichannel source                                         */
+/*  pMonoSrc                Mono band pass source                                       */
+/*  pDst                    Multichannel destination                                    */
+/*  NrFrames                Number of frames                                            */
+/*  NrChannels              Number of channels                                          */
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/*  Void                                                                                */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*                                                                                      */
+/****************************************************************************************/
+void AGC_MIX_VOL_Mc1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_FLOAT_t  *pInstance,
+                                 const LVM_FLOAT            *pMcSrc,
+                                 const LVM_FLOAT            *pMonoSrc,
+                                 LVM_FLOAT                  *pDst,
+                                 LVM_UINT16                 NrFrames,
+                                 LVM_UINT16                 NrChannels)
+{
+
+    /*
+     * General variables
+     */
+    LVM_UINT16      i, jj;                                      /* Sample index */
+    LVM_FLOAT       SampleVal;                                  /* Sample value */
+    LVM_FLOAT       Mono;                                       /* Mono sample */
+    LVM_FLOAT       AbsPeak;                                    /* Absolute peak signal */
+    LVM_FLOAT       AGC_Mult;                                   /* Short AGC gain */
+    LVM_FLOAT       Vol_Mult;                                   /* Short volume */
+
+
+    /*
+     * Instance control variables
+     */
+    LVM_FLOAT      AGC_Gain      = pInstance->AGC_Gain;         /* Get the current AGC gain */
+    LVM_FLOAT      AGC_MaxGain   = pInstance->AGC_MaxGain;      /* Get maximum AGC gain */
+    LVM_FLOAT      AGC_Attack    = pInstance->AGC_Attack;       /* Attack scaler */
+    /* Decay scaler */
+    LVM_FLOAT      AGC_Decay     = (pInstance->AGC_Decay * (1 << (DECAY_SHIFT)));
+    LVM_FLOAT      AGC_Target    = pInstance->AGC_Target;       /* Get the target level */
+    LVM_FLOAT      Vol_Current   = pInstance->Volume;           /* Actual volume setting */
+    LVM_FLOAT      Vol_Target    = pInstance->Target;           /* Target volume setting */
+    LVM_FLOAT      Vol_TC        = pInstance->VolumeTC;         /* Time constant */
+
+
+    /*
+     * Process on a sample by sample basis
+     */
+    for (i = 0; i < NrFrames; i++)                                  /* For each frame */
+    {
+
+        /*
+         * Get the scalers
+         */
+        AGC_Mult    = (LVM_FLOAT)(AGC_Gain);              /* Get the AGC gain */
+        Vol_Mult    = (LVM_FLOAT)(Vol_Current);           /* Get the volume gain */
+
+        AbsPeak = 0.0f;
+        /*
+         * Get the input samples
+         */
+        for (jj = 0; jj < NrChannels; jj++)
+        {
+            SampleVal  = *pMcSrc++;                       /* Get the sample value of jj Channel*/
+            Mono       = *pMonoSrc;                       /* Get the mono sample */
+
+            /*
+             * Apply the AGC gain to the mono input and mix with the input signal
+             */
+            SampleVal  += (Mono * AGC_Mult);                        /* Mix in the mono signal */
+
+            /*
+             * Apply the volume and write to the output stream
+             */
+            SampleVal  = SampleVal  * Vol_Mult;
+
+            *pDst++ = SampleVal;                                         /* Save the results */
+
+            /*
+             * Update the AGC gain
+             */
+            AbsPeak = Abs_Float(SampleVal) > AbsPeak ? Abs_Float(SampleVal) : AbsPeak;
+        }
+        if (AbsPeak > AGC_Target)
+        {
+            /*
+             * The signal is too large so decrease the gain
+             */
+            AGC_Gain = AGC_Gain * AGC_Attack;
+        }
+        else
+        {
+            /*
+             * The signal is too small so increase the gain
+             */
+            if (AGC_Gain > AGC_MaxGain)
+            {
+                AGC_Gain -= (AGC_Decay);
+            }
+            else
+            {
+                AGC_Gain += (AGC_Decay);
+            }
+        }
+        pMonoSrc++;
+        /*
+         * Update the gain
+         */
+        Vol_Current +=  (Vol_Target - Vol_Current) * ((LVM_FLOAT)Vol_TC / VOL_TC_FLOAT);
+    }
+
+
+    /*
+     * Update the parameters
+     */
+    pInstance->Volume = Vol_Current;                            /* Actual volume setting */
+    pInstance->AGC_Gain = AGC_Gain;
+
+    return;
+}
+#endif /*SUPPORT_MC*/
 #endif /*BUILD_FLOAT*/
diff --git a/media/libeffects/lvm/lib/Common/src/Add2_Sat_32x32.c b/media/libeffects/lvm/lib/Common/src/Add2_Sat_32x32.c
index e3edccc..66d6adb 100644
--- a/media/libeffects/lvm/lib/Common/src/Add2_Sat_32x32.c
+++ b/media/libeffects/lvm/lib/Common/src/Add2_Sat_32x32.c
@@ -43,11 +43,11 @@
         {
             if(a<0)
             {
-                c=0x80000000l;
+                c=0x80000000L;
             }
             else
             {
-                c=0x7FFFFFFFl;
+                c=0x7FFFFFFFL;
             }
         }
 
diff --git a/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c
index 960de79..d63365c 100644
--- a/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c
@@ -123,6 +123,87 @@
         }
 
     }
+
+#ifdef SUPPORT_MC
+/**************************************************************************
+ ASSUMPTIONS:
+ COEFS-
+ pBiquadState->coefs[0] is A2, pBiquadState->coefs[1] is A1
+ pBiquadState->coefs[2] is A0, pBiquadState->coefs[3] is -B2
+ pBiquadState->coefs[4] is -B1
+
+ DELAYS-
+ pBiquadState->pDelays[0] to
+ pBiquadState->pDelays[NrChannels - 1] is x(n-1) for all NrChannels
+
+ pBiquadState->pDelays[NrChannels] to
+ pBiquadState->pDelays[2*NrChannels - 1] is x(n-2) for all NrChannels
+
+ pBiquadState->pDelays[2*NrChannels] to
+ pBiquadState->pDelays[3*NrChannels - 1] is y(n-1) for all NrChannels
+
+ pBiquadState->pDelays[3*NrChannels] to
+ pBiquadState->pDelays[4*NrChannels - 1] is y(n-2) for all NrChannels
+***************************************************************************/
+void BQ_MC_D32F32C30_TRC_WRA_01 (           Biquad_FLOAT_Instance_t      *pInstance,
+                                            LVM_FLOAT                    *pDataIn,
+                                            LVM_FLOAT                    *pDataOut,
+                                            LVM_INT16                    NrFrames,
+                                            LVM_INT16                    NrChannels)
+
+
+    {
+        LVM_FLOAT yn, temp;
+        LVM_INT16 ii, jj;
+        PFilter_State_FLOAT pBiquadState = (PFilter_State_FLOAT) pInstance;
+
+         for (ii = NrFrames; ii != 0; ii--)
+         {
+            /**************************************************************************
+                            PROCESSING CHANNEL-WISE
+            ***************************************************************************/
+            for (jj = 0; jj < NrChannels; jj++)
+            {
+                /* yn= (A2  * x(n-2)) */
+                yn = pBiquadState->coefs[0] * pBiquadState->pDelays[NrChannels + jj];
+
+                /* yn+= (A1  * x(n-1)) */
+                temp = pBiquadState->coefs[1] * pBiquadState->pDelays[jj];
+                yn += temp;
+
+                /* yn+= (A0  * x(n)) */
+                temp = pBiquadState->coefs[2] * (*pDataIn);
+                yn += temp;
+
+                 /* yn+= (-B2  * y(n-2)) */
+                temp = pBiquadState->coefs[3] * pBiquadState->pDelays[NrChannels*3 + jj];
+                yn += temp;
+
+                /* yn+= (-B1  * y(n-1)) */
+                temp = pBiquadState->coefs[4] * pBiquadState->pDelays[NrChannels*2 + jj];
+                yn += temp;
+
+                /**************************************************************************
+                                UPDATING THE DELAYS
+                ***************************************************************************/
+                pBiquadState->pDelays[NrChannels * 3 + jj] =
+                    pBiquadState->pDelays[NrChannels * 2 + jj]; /* y(n-2)=y(n-1)*/
+                pBiquadState->pDelays[NrChannels * 1 + jj] =
+                    pBiquadState->pDelays[jj]; /* x(n-2)=x(n-1)*/
+                pBiquadState->pDelays[NrChannels * 2 + jj] = (LVM_FLOAT)yn; /* Update y(n-1)*/
+                pBiquadState->pDelays[jj] = (*pDataIn); /* Update x(n-1)*/
+                pDataIn++;
+                /**************************************************************************
+                                WRITING THE OUTPUT
+                ***************************************************************************/
+                *pDataOut = (LVM_FLOAT)yn; /* Write jj Channel output */
+                pDataOut++;
+            }
+        }
+
+    }
+#endif /*SUPPORT_MC*/
+
 #else
 void BQ_2I_D32F32C30_TRC_WRA_01 (           Biquad_Instance_t       *pInstance,
                                             LVM_INT32                    *pDataIn,
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index e489031..1f9f659 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -84,5 +84,65 @@
 
     return;
 }
+#ifdef SUPPORT_MC
+// Extract out the stereo channel pair from multichannel source.
+void Copy_Float_Mc_Stereo(const LVM_FLOAT *src,
+                 LVM_FLOAT *dst,
+                 LVM_INT16 NrFrames, /* Number of frames */
+                 LVM_INT32 NrChannels)
+{
+    LVM_INT16 ii;
+
+    if (NrChannels >= 2)
+    {
+        for (ii = NrFrames; ii != 0; ii--)
+        {
+            dst[0] = src[0];
+            dst[1] = src[1];
+            dst += 2;
+            src += NrChannels;
+        }
+    }
+    else if (NrChannels == 1)
+    {   // not expected to occur, provided for completeness.
+        src += (NrFrames - 1);
+        dst += 2 * (NrFrames - 1);
+        for (ii = NrFrames; ii != 0; ii--)
+        {
+            dst[0] = src[0];
+            dst[1] = src[0];
+            dst -= 2;
+            src --;
+        }
+    }
+}
+
+// Merge a multichannel source with stereo contained in dst, to dst.
+void Copy_Float_Stereo_Mc(const LVM_FLOAT *src,
+                 LVM_FLOAT *dst,
+                 LVM_INT16 NrFrames, /* Number of frames*/
+                 LVM_INT32 NrChannels)
+{
+    LVM_INT16 ii, jj;
+    LVM_FLOAT *src_st = dst + 2 * (NrFrames - 1);
+
+    // repack dst which carries stereo information
+    // together with the upper channels of src.
+    dst += NrChannels * (NrFrames - 1);
+    src += NrChannels * (NrFrames - 1);
+    for (ii = NrFrames; ii != 0; ii--)
+    {
+        dst[0] = src_st[0];
+        dst[1] = src_st[1];
+        for (jj = 2; jj < NrChannels; jj++)
+        {
+            dst[jj] = src[jj];
+        }
+        dst    -= NrChannels;
+        src    -= NrChannels;
+        src_st -= 2;
+    }
+}
+#endif
 #endif
 /**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c
index d261c9e..13fac5e 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c
@@ -33,7 +33,7 @@
         RightDC = pBiquadState->RightDC;
         for(j = NrSamples-1; j >= 0; j--)
         {
-            /* Subtract DC an saturate */
+            /* Subtract DC and saturate */
             Diff =* (pDataIn++) - (LeftDC);
             if (Diff > 1.0f) {
                 Diff = 1.0f; }
@@ -64,6 +64,58 @@
 
 
     }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       DC_Mc_D16_TRC_WRA_01
+ *
+ * DESCRIPTION:
+ *  DC removal from all channels of a multichannel input
+ *
+ * PARAMETERS:
+ *  pInstance      Instance pointer
+ *  pDataIn        Input/Source
+ *  pDataOut       Output/Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void DC_Mc_D16_TRC_WRA_01(Biquad_FLOAT_Instance_t       *pInstance,
+                          LVM_FLOAT               *pDataIn,
+                          LVM_FLOAT               *pDataOut,
+                          LVM_INT16               NrFrames,
+                          LVM_INT16               NrChannels)
+    {
+        LVM_FLOAT *ChDC;
+        LVM_FLOAT Diff;
+        LVM_INT32 j;
+        LVM_INT32 i;
+        PFilter_FLOAT_State_Mc pBiquadState = (PFilter_FLOAT_State_Mc) pInstance;
+
+        ChDC = &pBiquadState->ChDC[0];
+        for (j = NrFrames - 1; j >= 0; j--)
+        {
+            /* Subtract DC and saturate */
+            for (i = NrChannels - 1; i >= 0; i--)
+            {
+                Diff = *(pDataIn++) - (ChDC[i]);
+                if (Diff > 1.0f) {
+                    Diff = 1.0f;
+                } else if (Diff < -1.0f) {
+                    Diff = -1.0f; }
+                *(pDataOut++) = (LVM_FLOAT)Diff;
+                if (Diff < 0) {
+                    ChDC[i] -= DC_FLOAT_STEP;
+                } else {
+                    ChDC[i] += DC_FLOAT_STEP; }
+            }
+
+        }
+
+    }
+#endif
 #else
 void DC_2I_D16_TRC_WRA_01( Biquad_Instance_t       *pInstance,
                            LVM_INT16               *pDataIn,
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c
index 4f4fcd8..0f941a0 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c
@@ -24,6 +24,17 @@
     pBiquadState->LeftDC        = 0.0f;
     pBiquadState->RightDC       = 0.0f;
 }
+#ifdef SUPPORT_MC
+void  DC_Mc_D16_TRC_WRA_01_Init(Biquad_FLOAT_Instance_t   *pInstance)
+{
+    PFilter_FLOAT_State_Mc pBiquadState  = (PFilter_FLOAT_State_Mc) pInstance;
+    LVM_INT32 i;
+    for (i = 0; i < LVM_MAX_CHANNELS; i++)
+    {
+        pBiquadState->ChDC[i] = 0.0f;
+    }
+}
+#endif
 #else
 void  DC_2I_D16_TRC_WRA_01_Init(Biquad_Instance_t   *pInstance)
 {
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
index fa6b729..db3a6d3 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
@@ -34,6 +34,13 @@
     LVM_FLOAT  RightDC;    /* RightDC  */
 }Filter_FLOAT_State;
 typedef Filter_FLOAT_State * PFilter_FLOAT_State ;
+#ifdef SUPPORT_MC
+typedef struct _Filter_FLOAT_State_Mc_
+{
+    LVM_FLOAT  ChDC[LVM_MAX_CHANNELS];     /* ChannelDC  */
+} Filter_FLOAT_State_Mc;
+typedef Filter_FLOAT_State_Mc * PFilter_FLOAT_State_Mc ;
+#endif
 #else
 typedef struct _Filter_State_
 {
diff --git a/media/libeffects/lvm/lib/Common/src/DelayAllPass_Sat_32x16To32.c b/media/libeffects/lvm/lib/Common/src/DelayAllPass_Sat_32x16To32.c
index 2e20d79..b04e98e 100644
--- a/media/libeffects/lvm/lib/Common/src/DelayAllPass_Sat_32x16To32.c
+++ b/media/libeffects/lvm/lib/Common/src/DelayAllPass_Sat_32x16To32.c
@@ -53,11 +53,11 @@
         {
             if(a < 0)
             {
-                c = 0x80000000l;
+                c = 0x80000000L;
             }
             else
             {
-                c = 0x7FFFFFFFl;
+                c = 0x7FFFFFFFL;
             }
         }
         *dst = c;
@@ -72,11 +72,11 @@
         {
             if(a < 0)
             {
-                c = 0x80000000l;
+                c = 0x80000000L;
             }
             else
             {
-                c = 0x7FFFFFFFl;
+                c = 0x7FFFFFFFL;
             }
         }
         delay[AllPassOffset] = c;
diff --git a/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c
index 192927c..2a50f18 100644
--- a/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c
@@ -117,6 +117,93 @@
         }
 
     }
+#ifdef SUPPORT_MC
+/**************************************************************************
+ASSUMPTIONS:
+COEFS-
+pBiquadState->coefs[0] is A1,
+pBiquadState->coefs[1] is A0,
+pBiquadState->coefs[2] is -B1,
+DELAYS-
+pBiquadState->pDelays[2*ch + 0] is x(n-1) of the 'ch' - channel
+pBiquadState->pDelays[2*ch + 1] is y(n-1) of the 'ch' - channel
+The index 'ch' runs from 0 to (NrChannels - 1)
+
+PARAMETERS:
+ pInstance        Pointer Instance
+ pDataIn          Input/Source
+ pDataOut         Output/Destination
+ NrFrames         Number of frames
+ NrChannels       Number of channels
+
+RETURNS:
+ void
+***************************************************************************/
+void FO_Mc_D16F32C15_LShx_TRC_WRA_01(Biquad_FLOAT_Instance_t *pInstance,
+                                     LVM_FLOAT               *pDataIn,
+                                     LVM_FLOAT               *pDataOut,
+                                     LVM_INT16               NrFrames,
+                                     LVM_INT16               NrChannels)
+    {
+        LVM_FLOAT   yn;
+        LVM_FLOAT   Temp;
+        LVM_INT16   ii;
+        LVM_INT16   ch;
+        PFilter_Float_State pBiquadState = (PFilter_Float_State) pInstance;
+
+        LVM_FLOAT   *pDelays = pBiquadState->pDelays;
+        LVM_FLOAT   *pCoefs  = &pBiquadState->coefs[0];
+        LVM_FLOAT   A0 = pCoefs[1];
+        LVM_FLOAT   A1 = pCoefs[0];
+        LVM_FLOAT   B1 = pCoefs[2];
+
+
+
+
+        for (ii = NrFrames; ii != 0; ii--)
+        {
+
+            /**************************************************************************
+                            PROCESSING OF THE CHANNELS
+            ***************************************************************************/
+            for (ch = 0; ch < NrChannels; ch++)
+            {
+                // yn =A1  * x(n-1)
+                yn = (LVM_FLOAT)A1 * pDelays[0];
+
+                // yn+=A0  * x(n)
+                yn += (LVM_FLOAT)A0 * (*pDataIn);
+
+                // yn +=  (-B1  * y(n-1))
+                Temp = B1 * pDelays[1];
+                yn += Temp;
+
+
+                /**************************************************************************
+                                UPDATING THE DELAYS
+                ***************************************************************************/
+                pDelays[1] = yn; // Update y(n-1)
+                pDelays[0] = (*pDataIn++); // Update x(n-1)
+
+                /**************************************************************************
+                                WRITING THE OUTPUT
+                ***************************************************************************/
+
+                /*Saturate results*/
+                if (yn > 1.0f)
+                {
+                    yn = 1.0f;
+                } else if (yn < -1.0f) {
+                    yn = -1.0f;
+                }
+
+                *pDataOut++ = (LVM_FLOAT)yn;
+                pDelays += 2;
+            }
+            pDelays -= NrChannels * 2;
+        }
+    }
+#endif
 #else
 void FO_2I_D16F32C15_LShx_TRC_WRA_01(Biquad_Instance_t       *pInstance,
                                      LVM_INT16               *pDataIn,
diff --git a/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c b/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c
index ac1eea8..d02af88 100644
--- a/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c
+++ b/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c
@@ -68,5 +68,47 @@
 
     return;
 }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       FromMcToMono_Float
+ *
+ * DESCRIPTION:
+ *  Creates a mono stream from a multichannel input taking the avergae of
+ *  sample values of all channels
+ *
+ * PARAMETERS:
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void FromMcToMono_Float(const LVM_FLOAT *src,
+                        LVM_FLOAT *dst,
+                        LVM_INT16 NrFrames,
+                        LVM_INT16 NrChannels)
+{
+    LVM_INT16 ii, jj;
+    LVM_FLOAT Temp;
+
+    for (ii = NrFrames; ii != 0; ii--)
+    {
+        Temp = 0.0f;
+        for (jj = NrChannels; jj !=0; jj--)
+        {
+            Temp += (*src);
+            src++;
+        }
+        *dst = Temp / NrChannels;
+        dst++;
+    }
+
+    return;
+}
+#endif
+
 #endif
 /**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c
index d2694cc..419c7c5 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c
@@ -26,10 +26,10 @@
    FUNCTION LVCore_MIXSOFT_1ST_D16C31_WRA
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_Core_MixInSoft_D16C31_SAT( LVMixer3_FLOAT_st *ptrInstance,
-                                    const LVM_FLOAT     *src,
-                                          LVM_FLOAT     *dst,
-                                          LVM_INT16     n)
+void LVC_Core_MixInSoft_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
+                                   const LVM_FLOAT   *src,
+                                         LVM_FLOAT   *dst,
+                                         LVM_INT16   n)
 {
 
     LVM_INT16   OutLoop;
@@ -114,6 +114,139 @@
     }
     pInstance->Current = Current;
 }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_Core_MixInSoft_Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input.
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_Core_MixInSoft_Mc_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
+                                      const LVM_FLOAT   *src,
+                                            LVM_FLOAT   *dst,
+                                            LVM_INT16   NrFrames,
+                                            LVM_INT16   NrChannels)
+{
+
+    LVM_INT16   OutLoop;
+    LVM_INT16   InLoop;
+    LVM_INT32   ii, jj;
+    Mix_Private_FLOAT_st  *pInstance = (Mix_Private_FLOAT_st *)(ptrInstance->PrivateParams);
+    LVM_FLOAT   Delta = pInstance->Delta;
+    LVM_FLOAT   Current = pInstance->Current;
+    LVM_FLOAT   Target = pInstance->Target;
+    LVM_FLOAT   Temp;
+
+    /*
+     * Same operation is performed on consecutive frames.
+     * So two frames are processed in one iteration and
+     * the loop will run only for half the NrFrames value times.
+     */
+    InLoop = (LVM_INT16)(NrFrames >> 1);
+    /* OutLoop is calculated to handle cases where NrFrames value can be odd.*/
+    OutLoop = (LVM_INT16)(NrFrames - (InLoop << 1));
+
+    if (Current < Target) {
+        if (OutLoop) {
+            Temp = Current + Delta;
+            Current = Temp;
+            if (Current > Target)
+                Current = Target;
+
+           for (ii = OutLoop*NrChannels; ii != 0; ii--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+            Temp = Current + Delta;
+            Current = Temp;
+            if (Current > Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+            }
+        }
+    }
+    else{
+        if (OutLoop) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (ii = OutLoop*NrChannels; ii != 0; ii--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+            }
+        }
+    }
+    pInstance->Current = Current;
+}
+
+#endif
 #else
 void LVC_Core_MixInSoft_D16C31_SAT( LVMixer3_st *ptrInstance,
                                     const LVM_INT16     *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c
index b5e7f5c..5bfdad8 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c
@@ -27,10 +27,10 @@
    FUNCTION LVCore_MIXSOFT_1ST_D16C31_WRA
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance,
-                                    const LVM_FLOAT     *src,
-                                          LVM_FLOAT     *dst,
-                                          LVM_INT16     n)
+void LVC_Core_MixSoft_1St_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
+                                     const LVM_FLOAT   *src,
+                                           LVM_FLOAT   *dst,
+                                           LVM_INT16   n)
 {
     LVM_INT16   OutLoop;
     LVM_INT16   InLoop;
@@ -105,6 +105,119 @@
     }
     pInstance->Current=Current;
 }
+
+
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_Core_MixSoft_Mc_D16C31_WRA
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_Core_MixSoft_Mc_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT   *src,
+                                          LVM_FLOAT   *dst,
+                                          LVM_INT16   NrFrames,
+                                          LVM_INT16   NrChannels)
+{
+    LVM_INT16   OutLoop;
+    LVM_INT16   InLoop;
+    LVM_INT32   ii, jj;
+    Mix_Private_FLOAT_st  *pInstance=(Mix_Private_FLOAT_st *)(ptrInstance->PrivateParams);
+    LVM_FLOAT   Delta= (LVM_FLOAT)pInstance->Delta;
+    LVM_FLOAT   Current = (LVM_FLOAT)pInstance->Current;
+    LVM_FLOAT   Target= (LVM_FLOAT)pInstance->Target;
+    LVM_FLOAT   Temp;
+
+    /*
+     * Same operation is performed on consecutive frames.
+     * So two frames are processed in one iteration and
+     * the loop will run only for half the NrFrames value times.
+     */
+    InLoop = (LVM_INT16)(NrFrames >> 1);
+    /* OutLoop is calculated to handle cases where NrFrames value can be odd.*/
+    OutLoop = (LVM_INT16)(NrFrames - (InLoop << 1));
+
+    if (Current<Target) {
+        if (OutLoop) {
+
+            Temp = Current + Delta;
+            if (Temp > 1.0f)
+                Temp = 1.0f;
+            else if (Temp < -1.0f)
+                Temp = -1.0f;
+
+            Current=Temp;
+            if (Current > Target)
+                Current = Target;
+
+            for (ii = OutLoop; ii != 0; ii--) {
+                for (jj = NrChannels; jj !=0; jj--) {
+                    *(dst++) = (((LVM_FLOAT)*(src++) * (LVM_FLOAT)Current));
+                }
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+
+            Temp = Current + Delta;
+
+            if (Temp > 1.0f)
+                Temp = 1.0f;
+            else if (Temp < -1.0f)
+                Temp = -1.0f;
+
+            Current=Temp;
+            if (Current > Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--)
+            {
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+            }
+        }
+    }
+    else{
+        if (OutLoop) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (ii = OutLoop; ii != 0; ii--) {
+                for (jj = NrChannels; jj !=0; jj--) {
+                    *(dst++) = (((LVM_FLOAT)*(src++) * (LVM_FLOAT)Current));
+                }
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--)
+            {
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+            }
+        }
+    }
+    pInstance->Current=Current;
+}
+#endif
+
 #else
 void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_st *ptrInstance,
                                     const LVM_INT16     *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c
index 192f126..65956f7 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c
@@ -34,10 +34,10 @@
    FUNCTION MIXINSOFT_D16C31_SAT
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_FLOAT_st *ptrInstance,
-                               LVM_FLOAT             *src,
-                               LVM_FLOAT             *dst,
-                               LVM_INT16             n)
+void LVC_MixInSoft_D16C31_SAT(LVMixer3_1St_FLOAT_st *ptrInstance,
+                              const LVM_FLOAT       *src,
+                                    LVM_FLOAT       *dst,
+                                    LVM_INT16       n)
 {
     char        HardMixing = TRUE;
     LVM_FLOAT   TargetGain;
@@ -106,6 +106,110 @@
     }
 
 }
+
+
+
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_MixInSoft_Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_MixInSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *ptrInstance,
+                                 const LVM_FLOAT       *src,
+                                       LVM_FLOAT       *dst,
+                                       LVM_INT16       NrFrames,
+                                       LVM_INT16       NrChannels)
+{
+    char        HardMixing = TRUE;
+    LVM_FLOAT   TargetGain;
+    Mix_Private_FLOAT_st  *pInstance = \
+                             (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+    if (pInstance->Current != pInstance->Target)
+    {
+        if (pInstance->Delta == 1.0f) {
+            pInstance->Current = pInstance->Target;
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else{
+            /* Soft mixing has to be applied */
+            HardMixing = FALSE;
+            LVC_Core_MixInSoft_Mc_D16C31_SAT(&(ptrInstance->MixerStream[0]),
+                                             src,
+                                             dst,
+                                             NrFrames,
+                                             NrChannels);
+        }
+    }
+
+    /******************************************************************************
+       HARD MIXING
+    *******************************************************************************/
+
+    if (HardMixing) {
+        if (pInstance->Target != 0) { /* Nothing to do in case Target = 0 */
+            if ((pInstance->Target) == 1.0f) {
+                Add2_Sat_Float(src, dst, NrFrames*NrChannels);
+            }
+            else{
+                Mac3s_Sat_Float(src,
+                                (pInstance->Target),
+                                dst,
+                                NrFrames * NrChannels);
+                /* In case the LVCore function would have changed the Current value */
+                pInstance->Current = pInstance->Target;
+            }
+        }
+    }
+
+
+    /******************************************************************************
+       CALL BACK
+    *******************************************************************************/
+
+    if (ptrInstance->MixerStream[0].CallbackSet) {
+        if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(ptrInstance->MixerStream, TargetGain);
+            ptrInstance->MixerStream[0].CallbackSet = FALSE;
+            if (ptrInstance->MixerStream[0].pCallBack != 0) {
+                (*ptrInstance->MixerStream[0].pCallBack) (\
+                                                ptrInstance->MixerStream[0].pCallbackHandle,
+                                                ptrInstance->MixerStream[0].pGeneralPurpose,
+                                                ptrInstance->MixerStream[0].CallbackParam);
+            }
+        }
+    }
+
+}
+#endif
+
+
 #else
 void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_st *ptrInstance,
                                     LVM_INT16             *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c
index 1017de3..0678ae0 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c
@@ -103,6 +103,101 @@
         }
     }
 }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_MixSoft_Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of Frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_MixSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *ptrInstance,
+                                  const LVM_FLOAT      *src,
+                                        LVM_FLOAT      *dst,
+                                        LVM_INT16      NrFrames,
+                                        LVM_INT16      NrChannels)
+{
+    char        HardMixing = TRUE;
+    LVM_FLOAT   TargetGain;
+    Mix_Private_FLOAT_st  *pInstance = \
+                          (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+    if (pInstance->Current != pInstance->Target)
+    {
+        if (pInstance->Delta == 1.0f) {
+            pInstance->Current = pInstance->Target;
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else{
+            /* Soft mixing has to be applied */
+            HardMixing = FALSE;
+            LVC_Core_MixSoft_Mc_D16C31_WRA(&(ptrInstance->MixerStream[0]),
+                                           src,
+                                           dst,
+                                           NrFrames,
+                                           NrChannels);
+        }
+    }
+
+    /******************************************************************************
+       HARD MIXING
+    *******************************************************************************/
+
+    if (HardMixing) {
+        if (pInstance->Target == 0)
+            LoadConst_Float(0.0, dst, NrFrames * NrChannels);
+        else {
+            if ((pInstance->Target) != 1.0f)
+                Mult3s_Float(src, (pInstance->Target), dst, NrFrames * NrChannels);
+            else if (src != dst)
+                Copy_Float(src, dst, NrFrames * NrChannels);
+        }
+
+    }
+
+    /******************************************************************************
+       CALL BACK
+    *******************************************************************************/
+
+    if (ptrInstance->MixerStream[0].CallbackSet) {
+        if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(ptrInstance->MixerStream, TargetGain);
+            ptrInstance->MixerStream[0].CallbackSet = FALSE;
+            if (ptrInstance->MixerStream[0].pCallBack != 0) {
+                (*ptrInstance->MixerStream[0].pCallBack) (\
+                                                ptrInstance->MixerStream[0].pCallbackHandle,
+                                                ptrInstance->MixerStream[0].pGeneralPurpose,
+                                                ptrInstance->MixerStream[0].CallbackParam);
+            }
+        }
+    }
+}
+
+#endif
+
 #else
 void LVC_MixSoft_1St_D16C31_SAT( LVMixer3_1St_st *ptrInstance,
                                   const LVM_INT16             *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c
index 3c90071..8a89de1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c
@@ -26,11 +26,11 @@
    FUNCTION LVC_MixSoft_2St_D16C31_SAT.c
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_FLOAT_st *ptrInstance,
-                                 const   LVM_FLOAT       *src1,
-                                 LVM_FLOAT       *src2,
-                                 LVM_FLOAT       *dst,
-                                 LVM_INT16       n)
+void LVC_MixSoft_2St_D16C31_SAT(LVMixer3_2St_FLOAT_st *ptrInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                      LVM_FLOAT       *dst,
+                                      LVM_INT16       n)
 {
     Mix_Private_FLOAT_st  *pInstance1 = \
                              (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
@@ -67,6 +67,70 @@
                                          src1, src2, dst, n);
     }
 }
+
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_MixSoft_2Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  2 stream Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src1           First multichannel source
+ *  src2           Second multichannel source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_MixSoft_2Mc_D16C31_SAT(LVMixer3_2St_FLOAT_st *ptrInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                      LVM_FLOAT       *dst,
+                                      LVM_INT16       NrFrames,
+                                      LVM_INT16       NrChannels)
+{
+    Mix_Private_FLOAT_st  *pInstance1 = \
+                             (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+    Mix_Private_FLOAT_st  *pInstance2 = \
+                             (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[1].PrivateParams);
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+    if ((pInstance1->Current == pInstance1->Target) && (pInstance1->Current == 0)) {
+        LVC_MixSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[1]),
+                                    src2, dst, NrFrames, NrChannels);
+    }
+    else if ((pInstance2->Current == pInstance2->Target) && (pInstance2->Current == 0)) {
+        LVC_MixSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[0]),
+                                    src1, dst, NrFrames, NrChannels);
+    }
+    else if ((pInstance1->Current != pInstance1->Target) || \
+                                    (pInstance2->Current != pInstance2->Target))
+    {
+        LVC_MixSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[0]),
+                                   src1, dst, NrFrames, NrChannels);
+        LVC_MixInSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[1]),
+                                   src2, dst, NrFrames, NrChannels);
+    }
+    else{
+        /******************************************************************************
+           HARD MIXING
+        *******************************************************************************/
+        LVC_Core_MixHard_2St_D16C31_SAT(&ptrInstance->MixerStream[0],
+                                        &ptrInstance->MixerStream[1],
+                                        src1, src2, dst, NrFrames * NrChannels);
+    }
+}
+#endif
+
 #else
 void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_st *ptrInstance,
                                     const   LVM_INT16       *src1,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
index f904915..7f18747 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
@@ -157,10 +157,18 @@
 /*** 16 bit functions *************************************************************/
 
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_1St_D16C31_SAT( LVMixer3_1St_FLOAT_st *pInstance,
-                                 const LVM_FLOAT           *src,
-                                 LVM_FLOAT           *dst,
-                                 LVM_INT16           n);
+void LVC_MixSoft_1St_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                                const LVM_FLOAT       *src,
+                                      LVM_FLOAT       *dst,
+                                      LVM_INT16       n);
+#ifdef SUPPORT_MC
+void LVC_MixSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                               const LVM_FLOAT       *src,
+                                     LVM_FLOAT       *dst,
+                                     LVM_INT16       NrFrames,
+                                     LVM_INT16       NrChannels);
+#endif
+
 #else
 void LVC_MixSoft_1St_D16C31_SAT( LVMixer3_1St_st *pInstance,
                                   const LVM_INT16           *src,
@@ -169,10 +177,18 @@
 #endif
 
 #ifdef BUILD_FLOAT
-void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_FLOAT_st *pInstance,
-                               LVM_FLOAT           *src,
-                               LVM_FLOAT           *dst,
-                               LVM_INT16           n);
+void LVC_MixInSoft_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                              const LVM_FLOAT       *src,
+                                    LVM_FLOAT       *dst,
+                                    LVM_INT16       n);
+#ifdef SUPPORT_MC
+void LVC_MixInSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                                 const LVM_FLOAT       *src,
+                                       LVM_FLOAT       *dst,
+                                       LVM_INT16       NrFrames,
+                                       LVM_INT16       NrChannels);
+#endif
+
 #else
 void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_st *pInstance,
                                         LVM_INT16           *src,
@@ -181,11 +197,19 @@
 #endif
 
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_FLOAT_st *pInstance,
-                                 const LVM_FLOAT             *src1,
-                                 LVM_FLOAT             *src2,
-                                 LVM_FLOAT             *dst,  /* dst cannot be equal to src2 */
-                                 LVM_INT16             n);
+void LVC_MixSoft_2St_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                LVM_FLOAT             *dst,  /* dst cannot be equal to src2 */
+                                LVM_INT16             n);
+#ifdef SUPPORT_MC
+void LVC_MixSoft_2Mc_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                LVM_FLOAT             *dst,  /* dst cannot be equal to src2 */
+                                LVM_INT16             NrFrames,
+                                LVM_INT16             NrChannels);
+#endif
 #else
 void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_st *pInstance,
                                 const LVM_INT16             *src1,
@@ -200,10 +224,10 @@
 /* Gain values should not be more that 1.0                                        */
 /**********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_FLOAT_st         *pInstance,
-                                    const   LVM_FLOAT           *src,
-                                    LVM_FLOAT           *dst,   /* dst can be equal to src */
-                                    LVM_INT16           n);     /* Number of stereo samples */
+void LVC_MixSoft_1St_2i_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                   const   LVM_FLOAT     *src,
+                                   LVM_FLOAT             *dst,   /* dst can be equal to src */
+                                   LVM_INT16             n);     /* Number of stereo samples */
 #else
 void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_st         *pInstance,
                                 const   LVM_INT16           *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c
index c67455a..507eefa 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c
@@ -35,7 +35,7 @@
 {
     LVM_FLOAT       TargetGain;
     Mix_Private_FLOAT_st  *pInstance = (Mix_Private_FLOAT_st *)pStream->PrivateParams;
-    
+
     TargetGain = pInstance->Target;  // TargetGain
     return TargetGain;
 }
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
index d0d0e1f..f10094b 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
@@ -62,6 +62,13 @@
                                     const LVM_FLOAT     *src,
                                     LVM_FLOAT     *dst,
                                     LVM_INT16     n);
+#ifdef SUPPORT_MC
+void LVC_Core_MixInSoft_Mc_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT     *src,
+                                          LVM_FLOAT     *dst,
+                                          LVM_INT16     NrFrames,
+                                          LVM_INT16     NrChannels);
+#endif
 #else
 void LVC_Core_MixInSoft_D16C31_SAT( LVMixer3_st *pInstance,
                                     const LVM_INT16     *src,
@@ -73,6 +80,13 @@
                                       const LVM_FLOAT     *src,
                                       LVM_FLOAT     *dst,
                                       LVM_INT16     n);
+#ifdef SUPPORT_MC
+void LVC_Core_MixSoft_Mc_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT     *src,
+                                          LVM_FLOAT     *dst,
+                                          LVM_INT16     NrFrames,
+                                          LVM_INT16     NrChannels);
+#endif
 #else
 void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_st *pInstance,
                                     const LVM_INT16     *src,
diff --git a/media/libeffects/lvm/lib/Common/src/Mac3s_Sat_32x16.c b/media/libeffects/lvm/lib/Common/src/Mac3s_Sat_32x16.c
index e3fb40d..17fd833 100644
--- a/media/libeffects/lvm/lib/Common/src/Mac3s_Sat_32x16.c
+++ b/media/libeffects/lvm/lib/Common/src/Mac3s_Sat_32x16.c
@@ -50,11 +50,11 @@
         {
             if(temp<0)
             {
-                dOutVal=0x80000000l;
+                dOutVal=0x80000000L;
             }
             else
             {
-                dOutVal=0x7FFFFFFFl;
+                dOutVal=0x7FFFFFFFL;
             }
         }
 
diff --git a/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c
index 9c17a05..6c8b2db 100644
--- a/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c
@@ -119,6 +119,80 @@
         }
 
     }
+
+#ifdef SUPPORT_MC
+/**************************************************************************
+DELAYS-
+pBiquadState->pDelays[0] to
+pBiquadState->pDelays[NrChannels - 1] is x(n-1) for all NrChannels
+
+pBiquadState->pDelays[NrChannels] to
+pBiquadState->pDelays[2*NrChannels - 1] is x(n-2) for all NrChannels
+
+pBiquadState->pDelays[2*NrChannels] to
+pBiquadState->pDelays[3*NrChannels - 1] is y(n-1) for all NrChannels
+
+pBiquadState->pDelays[3*NrChannels] to
+pBiquadState->pDelays[4*NrChannels - 1] is y(n-2) for all NrChannels
+***************************************************************************/
+
+void PK_Mc_D32F32C14G11_TRC_WRA_01 (Biquad_FLOAT_Instance_t       *pInstance,
+                                    LVM_FLOAT               *pDataIn,
+                                    LVM_FLOAT               *pDataOut,
+                                    LVM_INT16               NrFrames,
+                                    LVM_INT16               NrChannels)
+    {
+        LVM_FLOAT yn, ynO, temp;
+        LVM_INT16 ii, jj;
+        PFilter_State_Float pBiquadState = (PFilter_State_Float) pInstance;
+
+         for (ii = NrFrames; ii != 0; ii--)
+         {
+
+            for (jj = 0; jj < NrChannels; jj++)
+            {
+                /**************************************************************************
+                                PROCESSING OF THE jj CHANNEL
+                ***************************************************************************/
+                /* yn= (A0  * (x(n) - x(n-2)))*/
+                temp = (*pDataIn) - pBiquadState->pDelays[NrChannels + jj];
+                yn = temp * pBiquadState->coefs[0];
+
+                /* yn+= ((-B2  * y(n-2))) */
+                temp = pBiquadState->pDelays[NrChannels*3 + jj] * pBiquadState->coefs[1];
+                yn += temp;
+
+                /* yn+= ((-B1 * y(n-1))) */
+                temp = pBiquadState->pDelays[NrChannels*2 + jj] * pBiquadState->coefs[2];
+                yn += temp;
+
+                /* ynO= ((Gain * yn)) */
+                ynO = yn * pBiquadState->coefs[3];
+
+                /* ynO=(ynO + x(n))*/
+                ynO += (*pDataIn);
+
+                /**************************************************************************
+                                UPDATING THE DELAYS
+                ***************************************************************************/
+                pBiquadState->pDelays[NrChannels * 3 + jj] =
+                    pBiquadState->pDelays[NrChannels * 2 + jj]; /* y(n-2)=y(n-1)*/
+                pBiquadState->pDelays[NrChannels * 1 + jj] =
+                    pBiquadState->pDelays[jj]; /* x(n-2)=x(n-1)*/
+                pBiquadState->pDelays[NrChannels * 2 + jj] = yn; /* Update y(n-1) */
+                pBiquadState->pDelays[jj] = (*pDataIn); /* Update x(n-1)*/
+                pDataIn++;
+
+                /**************************************************************************
+                                WRITING THE OUTPUT
+                ***************************************************************************/
+                *pDataOut = ynO; /* Write output*/
+                pDataOut++;
+            }
+        }
+
+    }
+#endif
 #else
 void PK_2I_D32F32C14G11_TRC_WRA_01 ( Biquad_Instance_t       *pInstance,
                                      LVM_INT32               *pDataIn,
diff --git a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
index 8e0b738..e7fdbf6 100644
--- a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
+++ b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
@@ -267,7 +267,9 @@
     /* Equaliser parameters */
     LVM_UINT16                  NBands;                 /* Number of bands */
     LVEQNB_BandDef_t            *pBandDefinition;       /* Pointer to equaliser definitions */
-
+#ifdef SUPPORT_MC
+    LVM_INT16                   NrChannels;
+#endif
 } LVEQNB_Params_t;
 
 
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
index 56b02e0..a9cd5fd 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
@@ -46,7 +46,12 @@
 #define LVEQNB_INSTANCE_ALIGN       4                   /* 32-bit alignment for instance structures */
 #define LVEQNB_DATA_ALIGN           4                   /* 32-bit alignment for structures */
 #define LVEQNB_COEF_ALIGN           4                   /* 32-bit alignment for long words */
+#ifdef SUPPORT_MC
+/* Number of buffers required for inplace processing */
+#define LVEQNB_SCRATCHBUFFERS       (LVM_MAX_CHANNELS * 2)
+#else
 #define LVEQNB_SCRATCHBUFFERS       4                   /* Number of buffers required for inplace processing */
+#endif
 #define LVEQNB_SCRATCH_ALIGN        4                   /* 32-bit alignment for long data */
 
 #define LVEQNB_BYPASS_MIXER_TC      100                 /* Bypass Mixer TC */
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c
index 6328181..d188c0e 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c
@@ -26,6 +26,7 @@
 #include "VectorArithmetic.h"
 #include "BIQUAD.h"
 
+#include <log/log.h>
 
 /****************************************************************************************/
 /*                                                                                      */
@@ -61,14 +62,18 @@
 LVEQNB_ReturnStatus_en LVEQNB_Process(LVEQNB_Handle_t       hInstance,
                                       const LVM_FLOAT       *pInData,
                                       LVM_FLOAT             *pOutData,
-                                      LVM_UINT16            NumSamples)
-{
-
-    LVM_UINT16          i;
-    Biquad_FLOAT_Instance_t   *pBiquad;
+                                      const LVM_UINT16      NrFrames)
+{                                     // updated to use samples = frames * channels.
     LVEQNB_Instance_t   *pInstance = (LVEQNB_Instance_t  *)hInstance;
-    LVM_FLOAT           *pScratch;
 
+#ifdef SUPPORT_MC
+    // Mono passed in as stereo
+    const LVM_INT32 NrChannels = pInstance->Params.NrChannels == 1
+        ? 2 : pInstance->Params.NrChannels;
+#else
+    const LVM_INT32 NrChannels = 2; // FCC_2
+#endif
+    const LVM_INT32 NrSamples = NrChannels * NrFrames;
 
      /* Check for NULL pointers */
     if((hInstance == LVM_NULL) || (pInData == LVM_NULL) || (pOutData == LVM_NULL))
@@ -82,14 +87,14 @@
         return LVEQNB_ALIGNMENTERROR;
     }
 
-    pScratch  = (LVM_FLOAT *)pInstance->pFastTemporary;
+    LVM_FLOAT * const pScratch = (LVM_FLOAT *)pInstance->pFastTemporary;
 
     /*
-    * Check the number of samples is not too large
+    * Check the number of frames is not too large
     */
-    if (NumSamples > pInstance->Capabilities.MaxBlockSize)
+    if (NrFrames > pInstance->Capabilities.MaxBlockSize)
     {
-        return(LVEQNB_TOOMANYSAMPLES);
+        return LVEQNB_TOOMANYSAMPLES;
     }
 
     if (pInstance->Params.OperatingMode == LVEQNB_ON)
@@ -97,16 +102,16 @@
         /*
          * Copy input data in to scratch buffer
          */
+        Copy_Float(pInData,     /* Source */
+                   pScratch,    /* Destination */
+                   (LVM_INT16)NrSamples);
 
-        Copy_Float((LVM_FLOAT *)pInData,      /* Source */
-                   pScratch,                  /* Destination */
-                   (LVM_INT16)(2 * NumSamples)); /* Left and Right */
         /*
          * For each section execte the filter unless the gain is 0dB
          */
         if (pInstance->NBands != 0)
         {
-            for (i = 0; i < pInstance->NBands; i++)
+            for (LVM_UINT16 i = 0; i < pInstance->NBands; i++)
             {
                 /*
                  * Check if band is non-zero dB gain
@@ -116,7 +121,7 @@
                     /*
                      * Get the address of the biquad instance
                      */
-                    pBiquad = &pInstance->pEQNB_FilterState_Float[i];
+                    Biquad_FLOAT_Instance_t *pBiquad = &pInstance->pEQNB_FilterState_Float[i];
 
 
                     /*
@@ -126,10 +131,18 @@
                     {
                         case LVEQNB_SinglePrecision_Float:
                         {
+#ifdef SUPPORT_MC
+                            PK_Mc_D32F32C14G11_TRC_WRA_01(pBiquad,
+                                                          pScratch,
+                                                          pScratch,
+                                                          (LVM_INT16)NrFrames,
+                                                          (LVM_INT16)NrChannels);
+#else
                             PK_2I_D32F32C14G11_TRC_WRA_01(pBiquad,
-                                                          (LVM_FLOAT *)pScratch,
-                                                          (LVM_FLOAT *)pScratch,
-                                                          (LVM_INT16)NumSamples);
+                                                          pScratch,
+                                                          pScratch,
+                                                          (LVM_INT16)NrFrames);
+#endif
                             break;
                         }
                         default:
@@ -141,19 +154,29 @@
 
 
         if(pInstance->bInOperatingModeTransition == LVM_TRUE){
+#ifdef SUPPORT_MC
+            LVC_MixSoft_2Mc_D16C31_SAT(&pInstance->BypassMixer,
+                                       pScratch,
+                                       pInData,
+                                       pScratch,
+                                       (LVM_INT16)NrFrames,
+                                       (LVM_INT16)NrChannels);
+#else
             LVC_MixSoft_2St_D16C31_SAT(&pInstance->BypassMixer,
-                                       (LVM_FLOAT *)pScratch,
-                                       (LVM_FLOAT *)pInData,
-                                       (LVM_FLOAT *)pScratch,
-                                       (LVM_INT16)(2 * NumSamples));
-            Copy_Float((LVM_FLOAT*)pScratch,                           /* Source */
-                       pOutData,                                       /* Destination */
-                       (LVM_INT16)(2 * NumSamples));                     /* Left and Right samples */
+                                       pScratch,
+                                       pInData,
+                                       pScratch,
+                                       (LVM_INT16)NrSamples);
+#endif
+            // duplicate with else clause(s)
+            Copy_Float(pScratch,                         /* Source */
+                       pOutData,                         /* Destination */
+                       (LVM_INT16)NrSamples);            /* All channel samples */
         }
         else{
             Copy_Float(pScratch,              /* Source */
                        pOutData,              /* Destination */
-                       (LVM_INT16 )(2 * NumSamples)); /* Left and Right */
+                       (LVM_INT16)NrSamples); /* All channel samples */
         }
     }
     else
@@ -163,12 +186,12 @@
          */
         if (pInData != pOutData)
         {
-            Copy_Float(pInData,                                    /* Source */
-                       pOutData,                                   /* Destination */
-                       (LVM_INT16)(2 * NumSamples));                 /* Left and Right samples */
+            Copy_Float(pInData,                          /* Source */
+                       pOutData,                         /* Destination */
+                       (LVM_INT16)NrSamples);            /* All channel samples */
         }
     }
-    return(LVEQNB_SUCCESS);
+    return LVEQNB_SUCCESS;
 
 }
 #else
@@ -312,4 +335,4 @@
     return(LVEQNB_SUCCESS);
 
 }
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c b/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c
index 566d84f..1d1283e 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c
@@ -579,7 +579,7 @@
                                pTemp,
                                pTemp,
                                (LVM_INT16)NumSamples);
-    
+
     /*
      *  Process all delay lines
      */
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
index a719053..8c7807f 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
@@ -64,12 +64,21 @@
         (
         (pNewParams->SampleRate != LVM_FS_8000) && (pNewParams->SampleRate != LVM_FS_11025) && (pNewParams->SampleRate != LVM_FS_12000)       &&
         (pNewParams->SampleRate != LVM_FS_16000) && (pNewParams->SampleRate != LVM_FS_22050) && (pNewParams->SampleRate != LVM_FS_24000)       &&
-        (pNewParams->SampleRate != LVM_FS_32000) && (pNewParams->SampleRate != LVM_FS_44100) && (pNewParams->SampleRate != LVM_FS_48000)      
+        (pNewParams->SampleRate != LVM_FS_32000) &&
+        (pNewParams->SampleRate != LVM_FS_44100) &&
+        (pNewParams->SampleRate != LVM_FS_48000)
 #ifdef HIGHER_FS
         && (pNewParams->SampleRate != LVM_FS_96000) && (pNewParams->SampleRate != LVM_FS_192000)
 #endif
         )
+#ifdef SUPPORT_MC
+        || ((pNewParams->SourceFormat != LVM_STEREO)       &&
+            (pNewParams->SourceFormat != LVM_MONOINSTEREO) &&
+            (pNewParams->SourceFormat != LVM_MONO)         &&
+            (pNewParams->SourceFormat != LVM_MULTICHANNEL)))
+#else
         || ((pNewParams->SourceFormat != LVM_STEREO) && (pNewParams->SourceFormat != LVM_MONOINSTEREO) && (pNewParams->SourceFormat != LVM_MONO)) )
+#endif
     {
         return (LVREV_OUTOFRANGE);
     }
diff --git a/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h b/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
index e75695e..e507a7c 100644
--- a/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
+++ b/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
@@ -205,6 +205,9 @@
     LVM_Fs_en               SampleRate;             /* Sampling rate */
     LVM_INT16               EffectLevel;            /* Effect level */
     LVM_UINT16              ReverbLevel;            /* Reverb level in % */
+#ifdef SUPPORT_MC
+    LVM_INT32               NrChannels;
+#endif
 } LVCS_Params_t;
 
 
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
index a97e4f0..ab8ccd1 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
@@ -61,6 +61,15 @@
 
 /* Memory */
 #define LVCS_SCRATCHBUFFERS              6      /* Number of buffers required for inplace processing */
+#ifdef SUPPORT_MC
+/*
+ * The Concert Surround module applies processing only on the first two
+ * channels of a multichannel input. The data of first two channels is copied
+ * from the multichannel input into scratch buffer. The buffers added here
+ * are used for this purpose
+ */
+#define LVCS_MC_SCRATCHBUFFERS           2
+#endif
 
 /* General */
 #define LVCS_INVALID                0xFFFF      /* Invalid init parameter */
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
index 3956d4d..ef1d9eb 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
@@ -76,6 +76,22 @@
     LVCS_Instance_t     *pInstance = (LVCS_Instance_t  *)hInstance;
     LVM_FLOAT           *pScratch;
     LVCS_ReturnStatus_en err;
+#ifdef SUPPORT_MC
+    LVM_FLOAT           *pStIn;
+    LVM_INT32           channels = pInstance->Params.NrChannels;
+#define NrFrames NumSamples  // alias for clarity
+
+    /*In case of mono processing, stereo input is created from mono
+     *and stored in pInData before applying any of the effects.
+     *However we do not update the value pInstance->Params.NrChannels
+     *at this point.
+     *So to treat the pInData as stereo we are setting channels to 2
+     */
+    if (channels == 1)
+    {
+        channels = 2;
+    }
+#endif
 
     pScratch  = (LVM_FLOAT *) \
                   pInstance->MemoryTable.Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress;
@@ -83,6 +99,25 @@
     /*
      * Check if the processing is inplace
      */
+#ifdef SUPPORT_MC
+    /*
+     * The pInput buffer holds the first 2 (Left, Right) channels information.
+     * Hence the memory required by this buffer is 2 * NumFrames.
+     * The Concert Surround module carries out processing only on L, R.
+     */
+    pInput = pScratch + (2 * NrFrames);
+    pStIn  = pScratch + (LVCS_SCRATCHBUFFERS * NrFrames);
+    /* The first two channel data is extracted from the input data and
+     * copied into pInput buffer
+     */
+    Copy_Float_Mc_Stereo((LVM_FLOAT *)pInData,
+                         (LVM_FLOAT *)pInput,
+                         NrFrames,
+                         channels);
+    Copy_Float((LVM_FLOAT *)pInput,
+               (LVM_FLOAT *)pStIn,
+               (LVM_INT16)(2 * NrFrames));
+#else
     if (pInData == pOutData)
     {
         /* Processing inplace */
@@ -96,14 +131,21 @@
         /* Processing outplace */
         pInput = pInData;
     }
-
+#endif
     /*
      * Call the stereo enhancer
      */
+#ifdef SUPPORT_MC
+    err = LVCS_StereoEnhancer(hInstance,              /* Instance handle */
+                              pStIn,                  /* Pointer to the input data */
+                              pOutData,               /* Pointer to the output data */
+                              NrFrames);              /* Number of frames to process */
+#else
     err = LVCS_StereoEnhancer(hInstance,              /* Instance handle */
                               pInData,                    /* Pointer to the input data */
                               pOutData,                   /* Pointer to the output data */
                               NumSamples);                /* Number of samples to process */
+#endif
 
     /*
      * Call the reverb generator
@@ -112,7 +154,7 @@
                                pOutData,                  /* Pointer to the input data */
                                pOutData,                  /* Pointer to the output data */
                                NumSamples);               /* Number of samples to process */
-    
+
     /*
      * Call the equaliser
      */
@@ -239,7 +281,15 @@
 
     LVCS_Instance_t *pInstance = (LVCS_Instance_t  *)hInstance;
     LVCS_ReturnStatus_en err;
-
+#ifdef SUPPORT_MC
+    /*Extract number of Channels info*/
+    LVM_INT32 channels = pInstance->Params.NrChannels;
+#define NrFrames NumSamples  // alias for clarity
+    if (channels == 1)
+    {
+        channels = 2;
+    }
+#endif
     /*
      * Check the number of samples is not too large
      */
@@ -260,8 +310,8 @@
                                   pInData,
                                   pOutData,
                                   NumSamples);
-            
-            
+
+
         /*
          * Compress to reduce expansion effect of Concert Sound and correct volume
          * differences for difference settings. Not applied in test modes
@@ -376,17 +426,32 @@
                             (LVM_INT16)NumSamples);
             }
         }
+#ifdef SUPPORT_MC
+        Copy_Float_Stereo_Mc(pInData,
+                             pOutData,
+                             NrFrames,
+                             channels);
+#endif
     }
     else
     {
         if (pInData != pOutData)
         {
+#ifdef SUPPORT_MC
+            /*
+             * The algorithm is disabled so just copy the data
+             */
+            Copy_Float((LVM_FLOAT *)pInData,               /* Source */
+                       (LVM_FLOAT *)pOutData,                  /* Destination */
+                       (LVM_INT16)(channels * NrFrames));    /* All Channels*/
+#else
             /*
              * The algorithm is disabled so just copy the data
              */
             Copy_Float((LVM_FLOAT *)pInData,               /* Source */
                        (LVM_FLOAT *)pOutData,                  /* Destination */
                        (LVM_INT16)(2 * NumSamples));             /* Left and right */
+#endif
         }
     }
 
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
index e154e29..0765764 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
@@ -515,16 +515,16 @@
 
 #if defined(BUILD_FLOAT) && defined(HIGHER_FS)
 const LVM_INT16 LVCS_VolumeTCTable[11] = {LVCS_VOL_TC_Fs8000,
-		                                  LVCS_VOL_TC_Fs11025,
-										  LVCS_VOL_TC_Fs12000,
-										  LVCS_VOL_TC_Fs16000,
-										  LVCS_VOL_TC_Fs22050,
-										  LVCS_VOL_TC_Fs24000,
-										  LVCS_VOL_TC_Fs32000,
-										  LVCS_VOL_TC_Fs44100,
-										  LVCS_VOL_TC_Fs48000,
-										  LVCS_VOL_TC_Fs96000,
-										  LVCS_VOL_TC_Fs192000
+                                          LVCS_VOL_TC_Fs11025,
+                                          LVCS_VOL_TC_Fs12000,
+                                          LVCS_VOL_TC_Fs16000,
+                                          LVCS_VOL_TC_Fs22050,
+                                          LVCS_VOL_TC_Fs24000,
+                                          LVCS_VOL_TC_Fs32000,
+                                          LVCS_VOL_TC_Fs44100,
+                                          LVCS_VOL_TC_Fs48000,
+                                          LVCS_VOL_TC_Fs96000,
+                                          LVCS_VOL_TC_Fs192000
 };
 #else
 const LVM_INT16 LVCS_VolumeTCTable[9] = {LVCS_VOL_TC_Fs8000,
@@ -546,16 +546,16 @@
 /************************************************************************************/
 #if defined(BUILD_FLOAT) && defined(HIGHER_FS)
 const LVM_INT32   LVCS_SampleRateTable[11] = {8000,
-		                                      11025,
-											  12000,
-											  16000,
-											  22050,
-											  24000,
-											  32000,
-											  44100,
-											  48000,
-											  96000,
-											  192000
+                                              11025,
+                                              12000,
+                                              16000,
+                                              22050,
+                                              24000,
+                                              32000,
+                                              44100,
+                                              48000,
+                                              96000,
+                                              192000
 };
 #else
 const LVM_INT16   LVCS_SampleRateTable[9] = {8000,
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
new file mode 100644
index 0000000..8ee807c
--- /dev/null
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -0,0 +1,46 @@
+// Build the unit tests for effects
+
+cc_test {
+    name: "lvmtest",
+    host_supported: false,
+    proprietary: true,
+
+    include_dirs: [
+        "frameworks/av/media/libeffects/lvm/lib/Bass/lib",
+        "frameworks/av/media/libeffects/lvm/lib/Bass/src",
+        "frameworks/av/media/libeffects/lvm/lib/Bundle/src",
+        "frameworks/av/media/libeffects/lvm/lib/Common/src",
+        "frameworks/av/media/libeffects/lvm/lib/Eq/lib",
+        "frameworks/av/media/libeffects/lvm/lib/Eq/src",
+        "frameworks/av/media/libeffects/lvm/lib/SpectrumAnalyzer/lib",
+        "frameworks/av/media/libeffects/lvm/lib/SpectrumAnalyzer/src",
+        "frameworks/av/media/libeffects/lvm/lib/StereoWidening/lib",
+        "frameworks/av/media/libeffects/lvm/lib/StereoWidening/src",
+        "frameworks/av/media/libeffects/lvm/wrapper/Bundle",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+
+    shared_libs: [
+        "libaudioutils",
+        "liblog",
+    ],
+
+    static_libs: [
+        "libmusicbundle",
+    ],
+
+    srcs: ["lvmtest.cpp"],
+
+    cflags: [
+        "-DBUILD_FLOAT",
+        "-DHIGHER_FS",
+        "-DSUPPORT_MC",
+
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..340469a
--- /dev/null
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Run tests in this directory.
+#
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+mm -j
+
+echo "waiting for device"
+
+adb root && adb wait-for-device remount
+
+# location of test files
+testdir="/data/local/tmp/lvmTest"
+
+#flags="-bE -tE -eqE -csE"
+flags="-csE -tE -eqE"
+
+
+echo "========================================"
+echo "testing lvm"
+adb shell mkdir $testdir
+adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw $testdir
+adb push $OUT/testcases/lvmtest/arm64/lvmtest $testdir
+
+# run multichannel effects at different channel counts, saving only the stereo channel pair.
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_1.raw\
+                          -ch:1 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_2.raw\
+                           -ch:2 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_4.raw\
+                           -ch:4 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_6.raw\
+                           -ch:6 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_8.raw\
+                           -ch:8 -fs:44100 $flags
+
+# two channel files should be identical to higher channel computation (first 2 channels).
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_2.raw
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_4.raw
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_6.raw
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_8.raw
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
new file mode 100644
index 0000000..01c5955
--- /dev/null
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -0,0 +1,682 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <assert.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
+#include <log/log.h>
+
+#include "EffectBundle.h"
+#include "LVM_Private.h"
+
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) \
+  do {               \
+  } while (false)
+#endif
+
+#define CHECK_ARG(cond)                                \
+  {                                                    \
+    if (!(cond)) {                                     \
+      ALOGE("\tLVM_ERROR : Invalid argument: " #cond); \
+      return -EINVAL;                                  \
+    }                                                  \
+  \
+}
+
+#define LVM_ERROR_CHECK(LvmStatus, callingFunc, calledFunc)     \
+  {                                                             \
+    if ((LvmStatus) == LVM_NULLADDRESS) {                       \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "null pointer returned by %s in %s\n\n\n\n",          \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+    if ((LvmStatus) == LVM_ALIGNMENTERROR) {                    \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "bad alignment returned by %s in %s\n\n\n\n",         \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+    if ((LvmStatus) == LVM_INVALIDNUMSAMPLES) {                 \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "bad number of samples returned by %s in %s\n\n\n\n", \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+    if ((LvmStatus) == LVM_OUTOFRANGE) {                        \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "out of range returned by %s in %s\n",                \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+  }
+
+struct lvmConfigParams_t {
+  int              samplingFreq    = 44100;
+  int              nrChannels      = 2;
+  int              fChannels       = 2;
+  int              bassEffectLevel = 0;
+  int              eqPresetLevel   = 0;
+  int              frameLength     = 256;
+  LVM_BE_Mode_en   bassEnable      = LVM_BE_OFF;
+  LVM_TE_Mode_en   trebleEnable    = LVM_TE_OFF;
+  LVM_EQNB_Mode_en eqEnable        = LVM_EQNB_OFF;
+  LVM_Mode_en      csEnable        = LVM_MODE_OFF;
+};
+
+void printUsage() {
+  printf("\nUsage: ");
+  printf("\n     <exceutable> -i:<input_file> -o:<out_file> [options]\n");
+  printf("\nwhere, \n     <inputfile>  is the input file name");
+  printf("\n                  on which LVM effects are applied");
+  printf("\n     <outputfile> processed output file");
+  printf("\n     and options are mentioned below");
+  printf("\n");
+  printf("\n     -help (or) -h");
+  printf("\n           Prints this usage information");
+  printf("\n");
+  printf("\n     -ch:<process_channels> (1 through 8)\n\n");
+  printf("\n     -fch:<file_channels> (1 through 8)\n\n");
+  printf("\n     -basslvl:<effect_level>");
+  printf("\n           A value that ranges between 0 - 15 default 0");
+  printf("\n");
+  printf("\n     -eqPreset:<preset Value>");
+  printf("\n           0 - Normal");
+  printf("\n           1 - Classical");
+  printf("\n           2 - Dance");
+  printf("\n           3 - Flat");
+  printf("\n           4 - Folk");
+  printf("\n           5 - Heavy Metal");
+  printf("\n           6 - Hip Hop");
+  printf("\n           7 - Jazz");
+  printf("\n           8 - Pop");
+  printf("\n           9 - Rock");
+  printf("\n           default 0");
+  printf("\n     -bE ");
+  printf("\n           Enable Dynamic Bass Enhancement");
+  printf("\n");
+  printf("\n     -tE ");
+  printf("\n           Enable Treble Boost");
+  printf("\n");
+  printf("\n     -csE ");
+  printf("\n           Enable Concert Surround");
+  printf("\n");
+  printf("\n     -eqE ");
+  printf("\n           Enable Equalizer");
+}
+
+//----------------------------------------------------------------------------
+// LvmEffect_free()
+//----------------------------------------------------------------------------
+// Purpose: Free all memory associated with the Bundle.
+//
+// Inputs:
+//  pContext:   effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void LvmEffect_free(struct EffectContext *pContext) {
+  LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
+  LVM_MemTab_t MemTab;
+
+  /* Free the algorithm memory */
+  LvmStatus = LVM_GetMemoryTable(pContext->pBundledContext->hInstance, &MemTab,
+                                 LVM_NULL);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmEffect_free")
+
+  for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
+    if (MemTab.Region[i].Size != 0) {
+      if (MemTab.Region[i].pBaseAddress != NULL) {
+        ALOGV("\tLvmEffect_free - START freeing %" PRIu32
+              " bytes for region %u at %p\n",
+              MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+
+        free(MemTab.Region[i].pBaseAddress);
+
+        ALOGV("\tLvmEffect_free - END   freeing %" PRIu32
+              " bytes for region %u at %p\n",
+              MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+      } else {
+        ALOGE(
+            "\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer "
+            "%" PRIu32 " bytes for region %u at %p ERROR\n",
+            MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+      }
+    }
+  }
+} /* end LvmEffect_free */
+
+//----------------------------------------------------------------------------
+// LvmBundle_init()
+//----------------------------------------------------------------------------
+// Purpose: Initialize engine with default configuration, creates instance
+// with all effects disabled.
+//
+// Inputs:
+//  pContext:   effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int LvmBundle_init(struct EffectContext *pContext, LVM_ControlParams_t *params) {
+  ALOGV("\tLvmBundle_init start");
+
+  pContext->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+  pContext->config.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+  pContext->config.inputCfg.format = EFFECT_BUFFER_FORMAT;
+  pContext->config.inputCfg.samplingRate = 44100;
+  pContext->config.inputCfg.bufferProvider.getBuffer = NULL;
+  pContext->config.inputCfg.bufferProvider.releaseBuffer = NULL;
+  pContext->config.inputCfg.bufferProvider.cookie = NULL;
+  pContext->config.inputCfg.mask = EFFECT_CONFIG_ALL;
+  pContext->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+  pContext->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+  pContext->config.outputCfg.format = EFFECT_BUFFER_FORMAT;
+  pContext->config.outputCfg.samplingRate = 44100;
+  pContext->config.outputCfg.bufferProvider.getBuffer = NULL;
+  pContext->config.outputCfg.bufferProvider.releaseBuffer = NULL;
+  pContext->config.outputCfg.bufferProvider.cookie = NULL;
+  pContext->config.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+  if (pContext->pBundledContext->hInstance != NULL) {
+    ALOGV(
+        "\tLvmBundle_init pContext->pBassBoost != NULL "
+        "-> Calling pContext->pBassBoost->free()");
+
+    LvmEffect_free(pContext);
+
+    ALOGV(
+        "\tLvmBundle_init pContext->pBassBoost != NULL "
+        "-> Called pContext->pBassBoost->free()");
+  }
+
+  LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
+  LVM_InstParams_t InstParams;                 /* Instance parameters */
+  LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS];  /* Equaliser band definitions */
+  LVM_HeadroomParams_t HeadroomParams;         /* Headroom parameters */
+  LVM_HeadroomBandDef_t HeadroomBandDef[LVM_HEADROOM_MAX_NBANDS];
+  LVM_MemTab_t MemTab; /* Memory allocation table */
+  bool bMallocFailure = LVM_FALSE;
+
+  /* Set the capabilities */
+  InstParams.BufferMode = LVM_UNMANAGED_BUFFERS;
+  InstParams.MaxBlockSize = MAX_CALL_SIZE;
+  InstParams.EQNB_NumBands = MAX_NUM_BANDS;
+  InstParams.PSA_Included = LVM_PSA_ON;
+
+  /* Allocate memory, forcing alignment */
+  LvmStatus = LVM_GetMemoryTable(LVM_NULL, &MemTab, &InstParams);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV("\tCreateInstance Succesfully called LVM_GetMemoryTable\n");
+
+  /* Allocate memory */
+  for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
+    if (MemTab.Region[i].Size != 0) {
+      MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
+
+      if (MemTab.Region[i].pBaseAddress == LVM_NULL) {
+        ALOGE(
+            "\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate "
+            "%" PRIu32 " bytes for region %u\n",
+            MemTab.Region[i].Size, i);
+        bMallocFailure = LVM_TRUE;
+        break;
+      } else {
+        ALOGV("\tLvmBundle_init CreateInstance allocated %" PRIu32
+              " bytes for region %u at %p\n",
+              MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+      }
+    }
+  }
+
+  /* If one or more of the memory regions failed to allocate, free the regions
+   * that were
+   * succesfully allocated and return with an error
+   */
+  if (bMallocFailure == LVM_TRUE) {
+    for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
+      if (MemTab.Region[i].pBaseAddress == LVM_NULL) {
+        ALOGE(
+            "\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate "
+            "%" PRIu32 " bytes for region %u Not freeing\n",
+            MemTab.Region[i].Size, i);
+      } else {
+        ALOGE(
+            "\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated "
+            "%" PRIu32 " bytes for region %u at %p- free\n",
+            MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+        free(MemTab.Region[i].pBaseAddress);
+      }
+    }
+    return -EINVAL;
+  }
+  ALOGV("\tLvmBundle_init CreateInstance Succesfully malloc'd memory\n");
+
+  /* Initialise */
+  pContext->pBundledContext->hInstance = LVM_NULL;
+
+  /* Init sets the instance handle */
+  LvmStatus = LVM_GetInstanceHandle(&pContext->pBundledContext->hInstance,
+                                    &MemTab, &InstParams);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_GetInstanceHandle", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV(
+      "\tLvmBundle_init CreateInstance Succesfully called "
+      "LVM_GetInstanceHandle\n");
+
+  /* Set the initial process parameters */
+  /* General parameters */
+  params->OperatingMode = LVM_MODE_ON;
+  params->SampleRate = LVM_FS_44100;
+  params->SourceFormat = LVM_STEREO;
+  params->SpeakerType = LVM_HEADPHONES;
+
+  pContext->pBundledContext->SampleRate = LVM_FS_44100;
+
+  /* Concert Sound parameters */
+  params->VirtualizerOperatingMode = LVM_MODE_OFF;
+  params->VirtualizerType = LVM_CONCERTSOUND;
+  params->VirtualizerReverbLevel = 100;
+  params->CS_EffectLevel = LVM_CS_EFFECT_NONE;
+
+  /* N-Band Equaliser parameters */
+  params->EQNB_OperatingMode = LVM_EQNB_ON;
+  params->EQNB_NBands = FIVEBAND_NUMBANDS;
+  params->pEQNB_BandDefinition = &BandDefs[0];
+
+  for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+    BandDefs[i].Frequency = EQNB_5BandPresetsFrequencies[i];
+    BandDefs[i].QFactor = EQNB_5BandPresetsQFactors[i];
+    BandDefs[i].Gain = EQNB_5BandSoftPresets[i];
+  }
+
+  /* Volume Control parameters */
+  params->VC_EffectLevel = 0;
+  params->VC_Balance = 0;
+
+  /* Treble Enhancement parameters */
+  params->TE_OperatingMode = LVM_TE_OFF;
+  params->TE_EffectLevel = 0;
+
+  /* PSA Control parameters */
+  params->PSA_Enable = LVM_PSA_OFF;
+  params->PSA_PeakDecayRate = (LVM_PSA_DecaySpeed_en)0;
+
+  /* Bass Enhancement parameters */
+  params->BE_OperatingMode = LVM_BE_ON;
+  params->BE_EffectLevel = 0;
+  params->BE_CentreFreq = LVM_BE_CENTRE_90Hz;
+  params->BE_HPF = LVM_BE_HPF_ON;
+
+  /* PSA Control parameters */
+  params->PSA_Enable = LVM_PSA_OFF;
+  params->PSA_PeakDecayRate = LVM_PSA_SPEED_MEDIUM;
+
+  /* TE Control parameters */
+  params->TE_OperatingMode = LVM_TE_OFF;
+  params->TE_EffectLevel = 0;
+
+  /* Activate the initial settings */
+  LvmStatus =
+      LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV(
+      "\tLvmBundle_init CreateInstance Succesfully called "
+      "LVM_SetControlParameters\n");
+
+  /* Set the headroom parameters */
+  HeadroomBandDef[0].Limit_Low = 20;
+  HeadroomBandDef[0].Limit_High = 4999;
+  HeadroomBandDef[0].Headroom_Offset = 0;
+  HeadroomBandDef[1].Limit_Low = 5000;
+  HeadroomBandDef[1].Limit_High = 24000;
+  HeadroomBandDef[1].Headroom_Offset = 0;
+  HeadroomParams.pHeadroomDefinition = &HeadroomBandDef[0];
+  HeadroomParams.Headroom_OperatingMode = LVM_HEADROOM_ON;
+  HeadroomParams.NHeadroomBands = 2;
+
+  LvmStatus = LVM_SetHeadroomParams(pContext->pBundledContext->hInstance,
+                                    &HeadroomParams);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_SetHeadroomParams", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV(
+      "\tLvmBundle_init CreateInstance Succesfully called "
+      "LVM_SetHeadroomParams\n");
+  ALOGV("\tLvmBundle_init End");
+  return 0;
+} /* end LvmBundle_init */
+
+int lvmCreate(struct EffectContext *pContext,
+              lvmConfigParams_t    *plvmConfigParams,
+              LVM_ControlParams_t  *params) {
+  int ret = 0;
+  pContext->pBundledContext = NULL;
+  pContext->pBundledContext = (BundledEffectContext *)malloc(sizeof(struct BundledEffectContext));
+  if (NULL == pContext->pBundledContext) {
+    return -EINVAL;
+  }
+
+  pContext->pBundledContext->SessionNo = 0;
+  pContext->pBundledContext->SessionId = 0;
+  pContext->pBundledContext->hInstance = NULL;
+  pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
+  pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
+  pContext->pBundledContext->bBassEnabled = LVM_FALSE;
+  pContext->pBundledContext->bBassTempDisabled = LVM_FALSE;
+  pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
+  pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
+  pContext->pBundledContext->nOutputDevice = AUDIO_DEVICE_NONE;
+  pContext->pBundledContext->nVirtualizerForcedDevice = AUDIO_DEVICE_NONE;
+  pContext->pBundledContext->NumberEffectsEnabled = 0;
+  pContext->pBundledContext->NumberEffectsCalled = 0;
+  pContext->pBundledContext->firstVolume = LVM_TRUE;
+  pContext->pBundledContext->volume = 0;
+
+  /* Saved strength is used to return the exact strength that was used in the
+   * set to the get
+   * because we map the original strength range of 0:1000 to 1:15, and this will
+   * avoid
+   * quantisation like effect when returning
+   */
+  pContext->pBundledContext->BassStrengthSaved = 0;
+  pContext->pBundledContext->VirtStrengthSaved = 0;
+  pContext->pBundledContext->CurPreset = PRESET_CUSTOM;
+  pContext->pBundledContext->levelSaved = 0;
+  pContext->pBundledContext->bMuteEnabled = LVM_FALSE;
+  pContext->pBundledContext->bStereoPositionEnabled = LVM_FALSE;
+  pContext->pBundledContext->positionSaved = 0;
+  pContext->pBundledContext->workBuffer = NULL;
+  pContext->pBundledContext->frameCount = -1;
+  pContext->pBundledContext->SamplesToExitCountVirt = 0;
+  pContext->pBundledContext->SamplesToExitCountBb = 0;
+  pContext->pBundledContext->SamplesToExitCountEq = 0;
+#if defined(BUILD_FLOAT) && !defined(NATIVE_FLOAT_BUFFER)
+  pContext->pBundledContext->pInputBuffer = NULL;
+  pContext->pBundledContext->pOutputBuffer = NULL;
+#endif
+  for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+    pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
+  }
+  pContext->config.inputCfg.channels = plvmConfigParams->nrChannels;
+  ALOGV("\tEffectCreate - Calling LvmBundle_init");
+  ret = LvmBundle_init(pContext, params);
+
+  if (ret < 0) {
+    ALOGE("\tLVM_ERROR : lvmCreate() Bundle init failed");
+    return ret;
+  }
+  return 0;
+}
+
+int lvmControl(struct EffectContext *pContext,
+               lvmConfigParams_t    *plvmConfigParams,
+               LVM_ControlParams_t  *params) {
+  LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
+  LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS];  /* Equaliser band definitions */
+  int eqPresetLevel = plvmConfigParams->eqPresetLevel;
+  int nrChannels = plvmConfigParams->nrChannels;
+  params->NrChannels = nrChannels;
+
+  /* Set the initial process parameters */
+  /* General parameters */
+  params->OperatingMode = LVM_MODE_ON;
+  params->SampleRate = LVM_FS_44100;
+  params->SourceFormat = LVM_STEREO;
+  params->SpeakerType = LVM_HEADPHONES;
+
+  pContext->pBundledContext->SampleRate = LVM_FS_44100;
+
+  /* Concert Sound parameters */
+  params->VirtualizerOperatingMode = plvmConfigParams->csEnable;
+  params->VirtualizerType = LVM_CONCERTSOUND;
+  params->VirtualizerReverbLevel = 100;
+  params->CS_EffectLevel = LVM_CS_EFFECT_NONE;
+
+  /* N-Band Equaliser parameters */
+  params->EQNB_OperatingMode = plvmConfigParams->eqEnable;
+  params->pEQNB_BandDefinition = &BandDefs[0];
+  for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+    BandDefs[i].Frequency = EQNB_5BandPresetsFrequencies[i];
+    BandDefs[i].QFactor = EQNB_5BandPresetsQFactors[i];
+    BandDefs[i].Gain =
+        EQNB_5BandSoftPresets[(FIVEBAND_NUMBANDS * eqPresetLevel) + i];
+  }
+
+  /* Volume Control parameters */
+  params->VC_EffectLevel = 0;
+  params->VC_Balance = 0;
+
+  /* Treble Enhancement parameters */
+  params->TE_OperatingMode = plvmConfigParams->trebleEnable;
+
+  /* PSA Control parameters */
+  params->PSA_Enable = LVM_PSA_ON;
+
+  /* Bass Enhancement parameters */
+  params->BE_OperatingMode = plvmConfigParams->bassEnable;
+
+  if (nrChannels == 1) {
+    params->SourceFormat = LVM_MONO;
+  }
+  if (nrChannels == 2) {
+    params->SourceFormat = LVM_STEREO;
+  }
+  if ((nrChannels > 2) && (nrChannels <= 8)) {
+    params->SourceFormat = LVM_MULTICHANNEL;
+  }
+
+  /* Activate the initial settings */
+  LvmStatus =
+      LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  LvmStatus = LVM_ApplyNewSettings(pContext->pBundledContext->hInstance);
+
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  return 0;
+}
+
+int lvmExecute(float *floatIn, float *floatOut, struct EffectContext *pContext,
+               lvmConfigParams_t *plvmConfigParams) {
+  const int frameLength = plvmConfigParams->frameLength;
+  return
+      LVM_Process(pContext->pBundledContext->hInstance, /* Instance handle */
+                  floatIn,                              /* Input buffer */
+                  floatOut,                             /* Output buffer */
+                  (LVM_UINT16)frameLength, /* Number of samples to read */
+                  0);                      /* Audio Time */
+}
+
+int lvmMainProcess(lvmConfigParams_t *plvmConfigParams, FILE *finp, FILE *fout) {
+  struct EffectContext context;
+  LVM_ControlParams_t params;
+
+  int errCode = lvmCreate(&context, plvmConfigParams, &params);
+  if (errCode) {
+    ALOGE("Error: lvmCreate returned with %d\n", errCode);
+    return errCode;
+  }
+
+  errCode = lvmControl(&context, plvmConfigParams, &params);
+  if (errCode) {
+    ALOGE("Error: lvmControl returned with %d\n", errCode);
+    return errCode;
+  }
+
+  const int channelCount = plvmConfigParams->nrChannels;
+  const int frameLength = plvmConfigParams->frameLength;
+  const int frameSize = channelCount * sizeof(float);  // processing size
+  const int ioChannelCount = plvmConfigParams->fChannels;
+  const int ioFrameSize = ioChannelCount * sizeof(short); // file load size
+  const int maxChannelCount = std::max(channelCount, ioChannelCount);
+  /*
+   * Mono input will be converted to 2 channels internally in the process call
+   * by copying the same data into the second channel.
+   * Hence when channelCount is 1, output buffer should be allocated for
+   * 2 channels. The memAllocChCount takes care of allocation of sufficient
+   * memory for the output buffer.
+   */
+  const int memAllocChCount = (channelCount == 1 ? 2 : channelCount);
+
+  std::vector<short> in(frameLength * maxChannelCount);
+  std::vector<short> out(frameLength * maxChannelCount);
+  std::vector<float> floatIn(frameLength * channelCount);
+  std::vector<float> floatOut(frameLength * memAllocChCount);
+
+  int frameCounter = 0;
+  while (fread(in.data(), ioFrameSize, frameLength, finp) == (size_t)frameLength) {
+    if (ioChannelCount != channelCount) {
+        adjust_channels(in.data(), ioChannelCount, in.data(), channelCount,
+               sizeof(short), frameLength * ioFrameSize);
+    }
+    memcpy_to_float_from_i16(floatIn.data(), in.data(), frameLength * channelCount);
+
+#if 1
+    errCode = lvmExecute(floatIn.data(), floatOut.data(), &context, plvmConfigParams);
+    if (errCode) {
+      printf("\nError: lvmExecute returned with %d\n", errCode);
+      return errCode;
+    }
+
+    (void)frameSize; // eliminate warning
+#else
+    memcpy(floatOut.data(), floatIn.data(), frameLength * frameSize);
+#endif
+    memcpy_to_i16_from_float(out.data(), floatOut.data(), frameLength * channelCount);
+    if (ioChannelCount != channelCount) {
+        adjust_channels(out.data(), channelCount, out.data(), ioChannelCount,
+               sizeof(short), frameLength * channelCount * sizeof(short));
+    }
+    (void) fwrite(out.data(), ioFrameSize, frameLength, fout);
+    frameCounter += frameLength;
+  }
+  printf("frameCounter: [%d]\n", frameCounter);
+  return 0;
+}
+
+int main(int argc, const char *argv[]) {
+  if (argc == 1) {
+    printUsage();
+    return -1;
+  }
+
+  lvmConfigParams_t lvmConfigParams{}; // default initialize
+  FILE *finp = nullptr, *fout = nullptr;
+
+  for (int i = 1; i < argc; i++) {
+    printf("%s ", argv[i]);
+    if (!strncmp(argv[i], "-i:", 3)) {
+      finp = fopen(argv[i] + 3, "rb");
+    } else if (!strncmp(argv[i], "-o:", 3)) {
+      fout = fopen(argv[i] + 3, "wb");
+    } else if (!strncmp(argv[i], "-fs:", 4)) {
+      const int samplingFreq = atoi(argv[i] + 4);
+      if (samplingFreq != 8000 && samplingFreq != 11025 &&
+          samplingFreq != 12000 && samplingFreq != 16000 &&
+          samplingFreq != 22050 && samplingFreq != 24000 &&
+          samplingFreq != 32000 && samplingFreq != 44100 &&
+          samplingFreq != 48000 && samplingFreq != 96000) {
+        ALOGE("\nError: Unsupported Sampling Frequency : %d\n", samplingFreq);
+        return -1;
+      }
+      lvmConfigParams.samplingFreq = samplingFreq;
+    } else if (!strncmp(argv[i], "-ch:", 4)) {
+      const int nrChannels = atoi(argv[i] + 4);
+      if (nrChannels > 8 || nrChannels < 1) {
+        ALOGE("\nError: Unsupported number of channels : %d\n", nrChannels);
+        return -1;
+      }
+      lvmConfigParams.nrChannels = nrChannels;
+    } else if (!strncmp(argv[i], "-fch:", 5)) {
+      const int fChannels = atoi(argv[i] + 5);
+      if (fChannels > 8 || fChannels < 1) {
+             ALOGE("\nError: Unsupported number of file channels : %d\n", fChannels);
+             return -1;
+           }
+           lvmConfigParams.fChannels = fChannels;
+    } else if (!strncmp(argv[i], "-basslvl:", 9)) {
+      const int bassEffectLevel = atoi(argv[i] + 9);
+      if (bassEffectLevel > 15 || bassEffectLevel < 0) {
+        ALOGE("\nError: Unsupported Bass Effect Level : %d\n",
+               bassEffectLevel);
+        printUsage();
+        return -1;
+      }
+      lvmConfigParams.bassEffectLevel = bassEffectLevel;
+    } else if (!strncmp(argv[i], "-eqPreset:", 10)) {
+      const int eqPresetLevel = atoi(argv[i] + 10);
+      if (eqPresetLevel > 9 || eqPresetLevel < 0) {
+        ALOGE("\nError: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
+        printUsage();
+        return -1;
+      }
+      lvmConfigParams.eqPresetLevel = eqPresetLevel;
+    } else if (!strcmp(argv[i], "-bE")) {
+      lvmConfigParams.bassEnable = LVM_BE_ON;
+    } else if (!strcmp(argv[i], "-eqE")) {
+      lvmConfigParams.eqEnable = LVM_EQNB_ON;
+    } else if (!strcmp(argv[i], "-tE")) {
+      lvmConfigParams.trebleEnable = LVM_TE_ON;
+    } else if (!strcmp(argv[i], "-csE")) {
+      lvmConfigParams.csEnable = LVM_MODE_ON;
+    } else if (!strcmp(argv[i], "-h")) {
+      printUsage();
+      return 0;
+    }
+  }
+
+  if (finp == nullptr || fout == nullptr) {
+    ALOGE("\nError: missing input/output files\n");
+    printUsage();
+    // ok not to close.
+    return -1;
+  }
+
+  const int errCode = lvmMainProcess(&lvmConfigParams, finp, fout);
+  fclose(finp);
+  fclose(fout);
+
+  if (errCode) {
+    ALOGE("Error: lvmMainProcess returns with the error: %d \n", errCode);
+    return -1;
+  }
+  return 0;
+}
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index 10fd970..16fa126 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -18,6 +18,7 @@
         "-fvisibility=hidden",
         "-DBUILD_FLOAT",
         "-DHIGHER_FS",
+        "-DSUPPORT_MC",
 
         "-Wall",
         "-Werror",
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 53d266a..09e9964 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -435,7 +435,7 @@
             (pSessionContext->bEqualizerInstantiated ==LVM_FALSE) &&
             (pSessionContext->bVirtualizerInstantiated==LVM_FALSE))
     {
-        #ifdef LVM_PCM
+#ifdef LVM_PCM
         if (pContext->pBundledContext->PcmInPtr != NULL) {
             fclose(pContext->pBundledContext->PcmInPtr);
             pContext->pBundledContext->PcmInPtr = NULL;
@@ -444,7 +444,7 @@
             fclose(pContext->pBundledContext->PcmOutPtr);
             pContext->pBundledContext->PcmOutPtr = NULL;
         }
-        #endif
+#endif
 
 
         // Clear the SessionIndex
@@ -751,19 +751,21 @@
 
     LVM_ReturnStatus_en     LvmStatus = LVM_SUCCESS;                /* Function call status */
     effect_buffer_t         *pOutTmp;
+    const LVM_INT32 NrChannels =
+        audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
 #ifndef NATIVE_FLOAT_BUFFER
     if (pContext->pBundledContext->pInputBuffer == nullptr ||
             pContext->pBundledContext->frameCount < frameCount) {
         free(pContext->pBundledContext->pInputBuffer);
         pContext->pBundledContext->pInputBuffer =
-                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * FCC_2);
+                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * NrChannels);
     }
 
     if (pContext->pBundledContext->pOutputBuffer == nullptr ||
             pContext->pBundledContext->frameCount < frameCount) {
         free(pContext->pBundledContext->pOutputBuffer);
         pContext->pBundledContext->pOutputBuffer =
-                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * FCC_2);
+                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * NrChannels);
     }
 
     if (pContext->pBundledContext->pInputBuffer == nullptr ||
@@ -784,7 +786,7 @@
                 free(pContext->pBundledContext->workBuffer);
             }
             pContext->pBundledContext->workBuffer =
-                    (effect_buffer_t *)calloc(frameCount, sizeof(effect_buffer_t) * FCC_2);
+                    (effect_buffer_t *)calloc(frameCount, sizeof(effect_buffer_t) * NrChannels);
             if (pContext->pBundledContext->workBuffer == NULL) {
                 return -ENOMEM;
             }
@@ -798,13 +800,15 @@
 
 #ifdef LVM_PCM
     fwrite(pIn,
-            frameCount*sizeof(effect_buffer_t) * FCC_2, 1, pContext->pBundledContext->PcmInPtr);
+           frameCount * sizeof(effect_buffer_t) * NrChannels,
+           1,
+           pContext->pBundledContext->PcmInPtr);
     fflush(pContext->pBundledContext->PcmInPtr);
 #endif
 
 #ifndef NATIVE_FLOAT_BUFFER
     /* Converting input data from fixed point to float point */
-    memcpy_to_float_from_i16(pInputBuff, pIn, frameCount * FCC_2);
+    memcpy_to_float_from_i16(pInputBuff, pIn, frameCount * NrChannels);
 
     /* Process the samples */
     LvmStatus = LVM_Process(pContext->pBundledContext->hInstance, /* Instance handle */
@@ -814,7 +818,7 @@
                             0);                                   /* Audio Time */
 
     /* Converting output data from float point to fixed point */
-    memcpy_to_i16_from_float(pOutTmp, pOutputBuff, frameCount * FCC_2);
+    memcpy_to_i16_from_float(pOutTmp, pOutputBuff, frameCount * NrChannels);
 
 #else
     /* Process the samples */
@@ -829,12 +833,14 @@
 
 #ifdef LVM_PCM
     fwrite(pOutTmp,
-            frameCount*sizeof(effect_buffer_t) * FCC_2, 1, pContext->pBundledContext->PcmOutPtr);
+           frameCount * sizeof(effect_buffer_t) * NrChannels,
+           1,
+           pContext->pBundledContext->PcmOutPtr);
     fflush(pContext->pBundledContext->PcmOutPtr);
 #endif
 
     if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
-        for (int i = 0; i < frameCount * FCC_2; i++) {
+        for (int i = 0; i < frameCount * NrChannels; i++) {
 #ifndef NATIVE_FLOAT_BUFFER
             pOut[i] = clamp16((LVM_INT32)pOut[i] + (LVM_INT32)pOutTmp[i]);
 #else
@@ -1232,45 +1238,50 @@
     CHECK_ARG(pConfig->inputCfg.samplingRate == pConfig->outputCfg.samplingRate);
     CHECK_ARG(pConfig->inputCfg.channels == pConfig->outputCfg.channels);
     CHECK_ARG(pConfig->inputCfg.format == pConfig->outputCfg.format);
+#ifdef SUPPORT_MC
+    CHECK_ARG(audio_channel_count_from_out_mask(pConfig->inputCfg.channels) <= LVM_MAX_CHANNELS);
+#else
     CHECK_ARG(pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO);
+#endif
     CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
               || pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
     CHECK_ARG(pConfig->inputCfg.format == EFFECT_BUFFER_FORMAT);
     pContext->config = *pConfig;
+    const LVM_INT16 NrChannels = audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
 
     switch (pConfig->inputCfg.samplingRate) {
     case 8000:
         SampleRate = LVM_FS_8000;
-        pContext->pBundledContext->SamplesPerSecond = 8000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 8000 * NrChannels;
         break;
     case 16000:
         SampleRate = LVM_FS_16000;
-        pContext->pBundledContext->SamplesPerSecond = 16000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 16000 * NrChannels;
         break;
     case 22050:
         SampleRate = LVM_FS_22050;
-        pContext->pBundledContext->SamplesPerSecond = 22050*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 22050 * NrChannels;
         break;
     case 32000:
         SampleRate = LVM_FS_32000;
-        pContext->pBundledContext->SamplesPerSecond = 32000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 32000 * NrChannels;
         break;
     case 44100:
         SampleRate = LVM_FS_44100;
-        pContext->pBundledContext->SamplesPerSecond = 44100*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 44100 * NrChannels;
         break;
     case 48000:
         SampleRate = LVM_FS_48000;
-        pContext->pBundledContext->SamplesPerSecond = 48000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 48000 * NrChannels;
         break;
 #if defined(BUILD_FLOAT) && defined(HIGHER_FS)
     case 96000:
         SampleRate = LVM_FS_96000;
-        pContext->pBundledContext->SamplesPerSecond = 96000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 96000 * NrChannels;
         break;
     case 192000:
         SampleRate = LVM_FS_192000;
-        pContext->pBundledContext->SamplesPerSecond = 192000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 192000 * NrChannels;
         break;
 #endif
     default:
@@ -1294,6 +1305,10 @@
 
         ActiveParams.SampleRate = SampleRate;
 
+#ifdef SUPPORT_MC
+        ActiveParams.NrChannels = NrChannels;
+#endif
+
         LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
 
         LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig")
@@ -1498,6 +1513,7 @@
     case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
     case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
     case AUDIO_DEVICE_OUT_USB_HEADSET:
+    // case AUDIO_DEVICE_OUT_USB_DEVICE:  // For USB testing of the virtualizer only.
         return 0;
     default :
         return -EINVAL;
@@ -1520,10 +1536,9 @@
 int VirtualizerIsConfigurationSupported(audio_channel_mask_t channelMask,
         audio_devices_t deviceType) {
     uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
-    if ((channelCount == 0) || (channelCount > 2)) {
+    if (channelCount < 1 || channelCount > LVM_MAX_CHANNELS) {
         return -EINVAL;
     }
-
     return VirtualizerIsDeviceSupported(deviceType);
 }
 
@@ -3216,6 +3231,7 @@
     EffectContext * pContext = (EffectContext *) self;
     int    status = 0;
     int    processStatus = 0;
+    const int NrChannels = audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
 
 //ALOGV("\tEffect_process Start : Enabled = %d     Called = %d (%8d %8d %8d)",
 //pContext->pBundledContext->NumberEffectsEnabled,pContext->pBundledContext->NumberEffectsCalled,
@@ -3246,7 +3262,7 @@
         (pContext->EffectType == LVM_BASS_BOOST)){
         //ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
         if(pContext->pBundledContext->SamplesToExitCountBb > 0){
-            pContext->pBundledContext->SamplesToExitCountBb -= outBuffer->frameCount * 2; // STEREO
+            pContext->pBundledContext->SamplesToExitCountBb -= outBuffer->frameCount * NrChannels;
             //ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountBb);
         }
@@ -3266,7 +3282,7 @@
         (pContext->EffectType == LVM_EQUALIZER)){
         //ALOGV("\tEffect_process() LVM_EQUALIZER Effect is not enabled");
         if(pContext->pBundledContext->SamplesToExitCountEq > 0){
-            pContext->pBundledContext->SamplesToExitCountEq -= outBuffer->frameCount * 2; // STEREO
+            pContext->pBundledContext->SamplesToExitCountEq -= outBuffer->frameCount * NrChannels;
             //ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountEq);
         }
@@ -3280,7 +3296,8 @@
         (pContext->EffectType == LVM_VIRTUALIZER)){
         //ALOGV("\tEffect_process() LVM_VIRTUALIZER Effect is not enabled");
         if(pContext->pBundledContext->SamplesToExitCountVirt > 0){
-            pContext->pBundledContext->SamplesToExitCountVirt -= outBuffer->frameCount * 2;// STEREO
+            pContext->pBundledContext->SamplesToExitCountVirt -=
+                outBuffer->frameCount * NrChannels;
             //ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountVirt);
         }
@@ -3331,7 +3348,7 @@
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
 
         if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
-            for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+            for (size_t i = 0; i < outBuffer->frameCount * NrChannels; ++i) {
 #ifdef NATIVE_FLOAT_BUFFER
                 outBuffer->f32[i] += inBuffer->f32[i];
 #else
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 3d9e62e..6002e95 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -113,9 +113,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -144,9 +141,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -267,9 +261,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -331,8 +322,5 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index e0f5a40..08c6a50 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -48,7 +48,8 @@
     {"amrwb",  AUDIO_ENCODER_AMR_WB},
     {"aac",    AUDIO_ENCODER_AAC},
     {"heaac",  AUDIO_ENCODER_HE_AAC},
-    {"aaceld", AUDIO_ENCODER_AAC_ELD}
+    {"aaceld", AUDIO_ENCODER_AAC_ELD}, 
+    {"opus",   AUDIO_ENCODER_OPUS}
 };
 
 const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
diff --git a/media/libmedia/NdkMediaFormatPriv.cpp b/media/libmedia/NdkMediaFormatPriv.cpp
index 3c84d6a..3a9fb8b 100644
--- a/media/libmedia/NdkMediaFormatPriv.cpp
+++ b/media/libmedia/NdkMediaFormatPriv.cpp
@@ -19,27 +19,22 @@
 
 #include <inttypes.h>
 
-//#include <ndk/include/media/NdkMediaFormat.h>
-
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
 #include <media/NdkMediaFormatPriv.h>
-#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AMessage.h>
-//#include <android_runtime/AndroidRuntime.h>
-//#include <android_util_Binder.h>
 
 #include <jni.h>
 
 using namespace android;
 
-extern "C" {
+namespace android {
 
 // private functions for conversion to/from AMessage
-AMediaFormat* AMediaFormat_fromMsg(const void* data) {
+AMediaFormat* AMediaFormat_fromMsg(sp<AMessage> *data) {
     ALOGV("private ctor");
     AMediaFormat* mData = new AMediaFormat();
-    mData->mFormat = *((sp<AMessage>*)data);
+    mData->mFormat = *data;
     if (mData->mFormat == NULL) {
         ALOGW("got NULL format");
         mData->mFormat = new AMessage;
@@ -47,10 +42,10 @@
     return mData;
 }
 
-void AMediaFormat_getFormat(const AMediaFormat* mData, void* dest) {
-    *((sp<AMessage>*)dest) = mData->mFormat;
+void AMediaFormat_getFormat(const AMediaFormat* mData, sp<AMessage> *dest) {
+    *dest = mData->mFormat;
 }
 
-} // extern "C"
+} // namespace android
 
 
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 514c795..fb861d7 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -233,6 +233,12 @@
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1POINT2),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1POINT4),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_HAPTIC_A),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO_HAPTIC_A),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_HAPTIC_AB),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB),
     TERMINATOR
 };
 
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 4984b18..cb8d375 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -56,6 +56,19 @@
     setCaptureCallBack(NULL, NULL, 0, 0);
 }
 
+void Visualizer::release()
+{
+    ALOGV("Visualizer::release()");
+    setEnabled(false);
+    Mutex::Autolock _l(mCaptureLock);
+
+    mCaptureThread.clear();
+    mCaptureCallBack = NULL;
+    mCaptureCbkUser = NULL;
+    mCaptureFlags = 0;
+    mCaptureRate = 0;
+}
+
 status_t Visualizer::setEnabled(bool enabled)
 {
     Mutex::Autolock _l(mCaptureLock);
@@ -115,7 +128,7 @@
     mCaptureRate = rate;
 
     if (cbk != NULL) {
-        mCaptureThread = new CaptureThread(*this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
+        mCaptureThread = new CaptureThread(this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
     }
     ALOGV("setCaptureCallBack() rate: %d thread %p flags 0x%08x",
             rate, mCaptureThread.get(), mCaptureFlags);
@@ -402,7 +415,7 @@
 
 //-------------------------------------------------------------------------
 
-Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate,
+Visualizer::CaptureThread::CaptureThread(Visualizer* receiver, uint32_t captureRate,
         bool bCanCallJava)
     : Thread(bCanCallJava), mReceiver(receiver)
 {
@@ -413,10 +426,14 @@
 bool Visualizer::CaptureThread::threadLoop()
 {
     ALOGV("CaptureThread %p enter", this);
+    sp<Visualizer> receiver = mReceiver.promote();
+    if (receiver == NULL) {
+        return false;
+    }
     while (!exitPending())
     {
         usleep(mSleepTimeUs);
-        mReceiver.periodicCapture();
+        receiver->periodicCapture();
     }
     ALOGV("CaptureThread %p exiting", this);
     return false;
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index d6a9cfb..ea41527 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -78,6 +78,7 @@
 
     // Overrides PassthruBufferProvider
     virtual void reset();
+    void setBufferProvider(AudioBufferProvider *p) override;
 
     // this function should be supplied by the derived class.  It converts
     // #frames in the *src pointer to the *dst pointer.  It is public because
@@ -186,6 +187,7 @@
 
     // Overrides PassthruBufferProvider
     virtual void reset();
+    void setBufferProvider(AudioBufferProvider *p) override;
 
     virtual status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
 
@@ -216,6 +218,53 @@
     bool                 mAudioPlaybackRateValid; // flag for current parameters validity
 };
 
+// AdjustBufferProvider derives from CopyBufferProvider to adjust sample data.
+// Expands or contracts sample data from one interleaved channel format to another.
+// Expanded channels are filled with zeros and put at the end of each audio frame.
+// Contracted channels are omitted from the end of each audio frame.
+class AdjustChannelsBufferProvider : public CopyBufferProvider {
+public:
+    AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
+            size_t outChannelCount, size_t frameCount);
+    //Overrides
+    void copyFrames(void *dst, const void *src, size_t frames) override;
+
+protected:
+    const audio_format_t mFormat;
+    const size_t         mInChannelCount;
+    const size_t         mOutChannelCount;
+    const size_t         mSampleSizeInBytes;
+};
+
+// AdjustChannelsNonDestructiveBufferProvider derives from CopyBufferProvider to adjust sample data.
+// Expands or contracts sample data from one interleaved channel format to another.
+// Extra expanded channels are interleaved in from the end of the input buffer.
+// Contracted channels are copied to the end of the output buffer.
+// Contracted channels could be written to output buffer.
+class AdjustChannelsNonDestructiveBufferProvider : public CopyBufferProvider {
+public:
+    AdjustChannelsNonDestructiveBufferProvider(audio_format_t format, size_t inChannelCount,
+            size_t outChannelCount, audio_format_t contractedFormat, size_t contractedFrameCount,
+            void* contractedBuffer);
+    //Overrides
+    status_t getNextBuffer(Buffer* pBuffer) override;
+    void copyFrames(void *dst, const void *src, size_t frames) override;
+    void reset() override;
+
+    void clearContractedFrames() { mContractedWrittenFrames = 0; }
+
+protected:
+    const audio_format_t mFormat;
+    const size_t         mInChannelCount;
+    const size_t         mOutChannelCount;
+    const size_t         mSampleSizeInBytes;
+    const size_t         mContractedChannelCount;
+    const audio_format_t mContractedFormat;
+    const size_t         mContractedFrameCount;
+    void                *mContractedBuffer;
+    size_t               mContractedWrittenFrames;
+    size_t               mContractedFrameSize;
+};
 // ----------------------------------------------------------------------------
 } // namespace android
 
diff --git a/media/libmedia/include/media/Visualizer.h b/media/libmedia/include/media/Visualizer.h
index f8f4f50..8078e36 100644
--- a/media/libmedia/include/media/Visualizer.h
+++ b/media/libmedia/include/media/Visualizer.h
@@ -131,6 +131,7 @@
     // getCaptureSize() but the length of the FFT is half of the size (both parts of the spectrum
     // are returned
     status_t getFft(uint8_t *fft);
+    void release();
 
 protected:
     // from IEffectClient
@@ -146,12 +147,12 @@
     class CaptureThread : public Thread
     {
     public:
-        CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava = false);
+        CaptureThread(Visualizer* visualizer, uint32_t captureRate, bool bCanCallJava = false);
 
     private:
         friend class Visualizer;
         virtual bool        threadLoop();
-        Visualizer& mReceiver;
+        wp<Visualizer> mReceiver;
         Mutex       mLock;
         uint32_t mSleepTimeUs;
     };
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index d8b0fe7..bdf1aae 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -67,7 +67,7 @@
     OUTPUT_FORMAT_AAC_ADTS = 6,
 
     OUTPUT_FORMAT_AUDIO_ONLY_END = 7, // Used in validating the output format.  Should be the
-                                      //  at the end of the audio only output formats.
+                                      // at the end of the audio only output formats.
 
     /* Stream over a socket, limited to a single stream */
     OUTPUT_FORMAT_RTP_AVP = 7,
@@ -81,6 +81,9 @@
     /* HEIC data in a HEIF container */
     OUTPUT_FORMAT_HEIF = 10,
 
+    /* Opus data in a OGG container */
+    OUTPUT_FORMAT_OGG = 11,
+
     OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
 };
 
@@ -92,6 +95,7 @@
     AUDIO_ENCODER_HE_AAC = 4,
     AUDIO_ENCODER_AAC_ELD = 5,
     AUDIO_ENCODER_VORBIS = 6,
+    AUDIO_ENCODER_OPUS = 7,
 
     AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
 };
diff --git a/media/libmediaextractor/Android.bp b/media/libmediaextractor/Android.bp
index 0871d60..6f2b35f 100644
--- a/media/libmediaextractor/Android.bp
+++ b/media/libmediaextractor/Android.bp
@@ -48,8 +48,5 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index d197b3f..26d0bd4 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -39,8 +39,7 @@
       mRangeOffset(0),
       mRangeLength(size),
       mOwnsData(false),
-      mMetaData(new MetaDataBase),
-      mOriginal(NULL) {
+      mMetaData(new MetaDataBase) {
 }
 
 MediaBuffer::MediaBuffer(size_t size)
@@ -51,8 +50,7 @@
       mRangeOffset(0),
       mRangeLength(size),
       mOwnsData(true),
-      mMetaData(new MetaDataBase),
-      mOriginal(NULL) {
+      mMetaData(new MetaDataBase) {
     if (size < kSharedMemThreshold
             || std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
         mData = malloc(size);
@@ -84,8 +82,7 @@
       mRangeLength(mSize),
       mBuffer(buffer),
       mOwnsData(false),
-      mMetaData(new MetaDataBase),
-      mOriginal(NULL) {
+      mMetaData(new MetaDataBase) {
 }
 
 void MediaBuffer::release() {
@@ -162,11 +159,6 @@
         mData = NULL;
     }
 
-    if (mOriginal != NULL) {
-        mOriginal->release();
-        mOriginal = NULL;
-    }
-
    if (mMemory.get() != nullptr) {
        getSharedControl()->setDeadObject();
    }
@@ -178,15 +170,4 @@
     mObserver = observer;
 }
 
-MediaBufferBase *MediaBuffer::clone() {
-    MediaBuffer *buffer = new MediaBuffer(mData, mSize);
-    buffer->set_range(mRangeOffset, mRangeLength);
-    buffer->mMetaData = new MetaDataBase(*mMetaData);
-
-    add_ref();
-    buffer->mOriginal = this;
-
-    return buffer;
-}
-
 }  // namespace android
diff --git a/media/libmediaextractor/MediaBufferGroup.cpp b/media/libmediaextractor/MediaBufferGroup.cpp
index 62b83cc..4e6beca 100644
--- a/media/libmediaextractor/MediaBufferGroup.cpp
+++ b/media/libmediaextractor/MediaBufferGroup.cpp
@@ -44,12 +44,16 @@
 };
 
 MediaBufferGroup::MediaBufferGroup(size_t growthLimit)
-    : mInternal(new InternalData()) {
+    : mWrapper(nullptr), mInternal(new InternalData()) {
     mInternal->mGrowthLimit = growthLimit;
 }
 
 MediaBufferGroup::MediaBufferGroup(size_t buffers, size_t buffer_size, size_t growthLimit)
-    : mInternal(new InternalData()) {
+    : mWrapper(nullptr), mInternal(new InternalData()) {
+    init(buffers, buffer_size, growthLimit);
+}
+
+void MediaBufferGroup::init(size_t buffers, size_t buffer_size, size_t growthLimit) {
     mInternal->mGrowthLimit = growthLimit;
 
     if (mInternal->mGrowthLimit > 0 && buffers > mInternal->mGrowthLimit) {
diff --git a/media/libmediaextractor/include/media/DataSource.h b/media/libmediaextractor/include/media/DataSource.h
index cb96ff5..1f7a473 100644
--- a/media/libmediaextractor/include/media/DataSource.h
+++ b/media/libmediaextractor/include/media/DataSource.h
@@ -22,7 +22,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/DataSourceBase.h>
 #include <media/IDataSource.h>
-#include <media/MediaExtractorPluginHelper.h>
+#include <media/MediaExtractorPluginApi.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <utils/threads.h>
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index 5a25965..5b362a4 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -80,11 +80,6 @@
 
     virtual void setObserver(MediaBufferObserver *group);
 
-    // Returns a clone of this MediaBuffer increasing its reference count.
-    // The clone references the same data but has its own range and
-    // MetaData.
-    virtual MediaBufferBase *clone();
-
     // sum of localRefcount() and remoteRefcount()
     // Result should be treated as approximate unless the result precludes concurrent accesses.
     virtual int refcount() const {
@@ -158,8 +153,6 @@
 
     MetaDataBase* mMetaData;
 
-    MediaBuffer *mOriginal;
-
     static std::atomic_int_least32_t mUseSharedMemory;
 
     MediaBuffer(const MediaBuffer &);
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
index 6c8d94a..eb49f4c 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
@@ -18,6 +18,10 @@
 
 #define MEDIA_BUFFER_BASE_H_
 
+#include <media/MediaExtractorPluginApi.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/NdkMediaFormatPriv.h>
+
 namespace android {
 
 class MediaBufferBase;
@@ -66,17 +70,54 @@
 
     virtual void setObserver(MediaBufferObserver *group) = 0;
 
-    // Returns a clone of this MediaBufferBase increasing its reference
-    // count. The clone references the same data but has its own range and
-    // MetaData.
-    virtual MediaBufferBase *clone() = 0;
-
     virtual int refcount() const = 0;
 
     virtual int localRefcount() const = 0;
     virtual int remoteRefcount() const = 0;
 
     virtual ~MediaBufferBase() {};
+
+    CMediaBufferV3 *wrap() {
+        if (mWrapper) {
+            return mWrapper;
+        }
+        mWrapper = new CMediaBufferV3;
+        mWrapper->handle = this;
+
+        mWrapper->release = [](void *handle) -> void {
+            ((MediaBufferBase*)handle)->release();
+        };
+
+        mWrapper->data = [](void *handle) -> void * {
+            return ((MediaBufferBase*)handle)->data();
+        };
+
+        mWrapper->size = [](void *handle) -> size_t {
+            return ((MediaBufferBase*)handle)->size();
+        };
+
+        mWrapper->set_range = [](void *handle, size_t offset, size_t length) -> void {
+            return ((MediaBufferBase*)handle)->set_range(offset, length);
+        };
+
+        mWrapper->meta_data = [](void *handle) -> AMediaFormat* {
+            if (((MediaBufferBase*)handle)->mFormat == nullptr) {
+                sp<AMessage> msg = new AMessage();
+                ((MediaBufferBase*)handle)->mFormat = AMediaFormat_fromMsg(&msg);
+            }
+            return ((MediaBufferBase*)handle)->mFormat;
+        };
+
+        return mWrapper;
+    }
+protected:
+    MediaBufferBase() {
+        mWrapper = nullptr;
+        mFormat = nullptr;
+    }
+private:
+    CMediaBufferV3 *mWrapper;
+    AMediaFormat *mFormat;
 };
 
 }  // namespace android
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
index 75d5df7..dc04556 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
@@ -20,6 +20,8 @@
 
 #include <list>
 
+#include <media/MediaExtractorPluginApi.h>
+#include <media/NdkMediaErrorPriv.h>
 #include <media/stagefright/MediaBufferBase.h>
 #include <utils/Errors.h>
 #include <utils/threads.h>
@@ -57,12 +59,54 @@
     // If buffer is nullptr, have acquire_buffer() check for remote release.
     virtual void signalBufferReturned(MediaBufferBase *buffer);
 
+    CMediaBufferGroupV3 *wrap() {
+        if (mWrapper) {
+            return mWrapper;
+        }
+
+        mWrapper = new CMediaBufferGroupV3;
+        mWrapper->handle = this;
+
+        mWrapper->add_buffer = [](void *handle, size_t size) -> void {
+            MediaBufferBase *buf = MediaBufferBase::Create(size);
+            ((MediaBufferGroup*)handle)->add_buffer(buf);
+        };
+
+        mWrapper->init = [](void *handle,
+                size_t buffers, size_t buffer_size, size_t growthLimit) -> bool {
+            ((MediaBufferGroup*)handle)->init(buffers, buffer_size, growthLimit);
+          //  ((MediaBufferGroup*)handle)->mWrapper->init = nullptr; // enforce call-once
+            return true;
+        };
+
+        mWrapper->acquire_buffer = [](void *handle,
+                CMediaBufferV3 **buf, bool nonBlocking, size_t requestedSize) -> media_status_t {
+            MediaBufferBase *acquiredBuf = nullptr;
+            status_t err = ((MediaBufferGroup*)handle)->acquire_buffer(
+                    &acquiredBuf, nonBlocking, requestedSize);
+            if (err == OK && acquiredBuf != nullptr) {
+                *buf = acquiredBuf->wrap();
+            } else {
+                *buf = nullptr;
+            }
+            return translate_error(err);
+        };
+
+        mWrapper->has_buffers = [](void *handle) -> bool {
+            return ((MediaBufferGroup*)handle)->has_buffers();
+        };
+
+        return mWrapper;
+    }
+
 private:
+    CMediaBufferGroupV3 *mWrapper;
     struct InternalData;
     InternalData *mInternal;
 
     MediaBufferGroup(const MediaBufferGroup &);
     MediaBufferGroup &operator=(const MediaBufferGroup &);
+    void init(size_t buffers, size_t buffer_size, size_t growthLimit);
 };
 
 }  // namespace android
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 07e124b..8f8c478 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -6,15 +6,16 @@
     srcs: [
         "IMediaAnalyticsService.cpp",
         "MediaAnalyticsItem.cpp",
+        "MediaMetrics.cpp",
     ],
 
     shared_libs: [
-        "liblog",
-        "libcutils",
-        "libutils",
-        "libbinder",
-        "libstagefright_foundation",
         "libbase",
+        "libbinder",
+        "libcutils",
+        "liblog",
+        "libstagefright_foundation",
+        "libutils",
     ],
 
     export_include_dirs: ["include"],
@@ -31,8 +32,14 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
+
+    // enumerate the stable interface
+// this would mean nobody can use the C++ interface. have to rework some things.
+//  stubs: {
+//      symbol_file: "libmediametrics.map.txt",
+//      versions: [
+//          "1" ,
+//      ]
+//  },
 }
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index d3de01e..448e2d9 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -487,6 +487,18 @@
     return true;
 }
 
+bool MediaAnalyticsItem::getString(MediaAnalyticsItem::Attr name, std::string *value) {
+    Prop *prop = findProp(name);
+    if (prop == NULL || prop->mType != kTypeCString) {
+        return false;
+    }
+    if (value != NULL) {
+        // std::string makes a copy for us
+        *value = prop->u.CStringValue;
+    }
+    return true;
+}
+
 // remove indicated keys and their values
 // return value is # keys removed
 int32_t MediaAnalyticsItem::filter(int n, MediaAnalyticsItem::Attr attrs[]) {
@@ -726,6 +738,15 @@
 }
 
 
+const char *MediaAnalyticsItem::toCString() {
+   return toCString(PROTO_LAST);
+}
+
+const char * MediaAnalyticsItem::toCString(int version) {
+    std::string val = toString(version);
+    return strdup(val.c_str());
+}
+
 std::string MediaAnalyticsItem::toString() {
    return toString(PROTO_LAST);
 }
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
new file mode 100644
index 0000000..9b08aa7
--- /dev/null
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaMetrics"
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <media/MediaAnalyticsItem.h>
+#include <media/MediaMetrics.h>
+
+//
+// provide a C-ish interface that is easier to stabilize than the existing C++
+// interface
+//
+// ALL functions returning a char * give responsibility for the allocated buffer
+// to the caller. The caller is responsible to call free() on that pointer.
+//
+
+// manage the overall record
+mediametrics_handle_t mediametrics_create(mediametricskey_t key) {
+    android::MediaAnalyticsItem *item = new android::MediaAnalyticsItem(key);
+    return (mediametrics_handle_t) item;
+}
+
+void mediametrics_delete(mediametrics_handle_t handle) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return;
+    delete item;
+}
+
+mediametricskey_t mediametrics_getKey(mediametrics_handle_t handle) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return NULL;
+    return strdup(item->getKey().c_str());
+}
+
+// nuplayer, et al use it when acting as proxies
+void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->setUid(uid);
+}
+
+// set attributes
+//
+
+void mediametrics_setInt32(mediametrics_handle_t handle, attr_t attr,
+                                int32_t value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->setInt32(attr, value);
+}
+
+void mediametrics_setInt64(mediametrics_handle_t handle, attr_t attr,
+                                int64_t value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->setInt64(attr, value);
+}
+
+void mediametrics_setDouble(mediametrics_handle_t handle, attr_t attr,
+                                 double value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->setDouble(attr, value);
+}
+
+void mediametrics_setRate(mediametrics_handle_t handle, attr_t attr,
+                               int64_t count, int64_t duration) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->setRate(attr, count, duration);
+}
+
+void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
+                                 const char *value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->setCString(attr, value);
+}
+
+// fused get/add/set; if attr wasn't there, it's a simple set.
+//
+
+void mediametrics_addInt32(mediametrics_handle_t handle, attr_t attr,
+                                int32_t value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->addInt32(attr, value);
+}
+
+void mediametrics_addInt64(mediametrics_handle_t handle, attr_t attr,
+                                int64_t value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->addInt64(attr, value);
+}
+
+void mediametrics_addDouble(mediametrics_handle_t handle, attr_t attr,
+                                 double value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->addDouble(attr, value);
+}
+
+void mediametrics_addRate(mediametrics_handle_t handle, attr_t attr,
+                               int64_t count, int64_t duration) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item != NULL) item->addRate(attr, count, duration);
+}
+
+// find & extract values
+// return indicates whether attr exists (and thus whether value filled in)
+// NULL parameter value suppresses storage of value.
+//
+
+bool mediametrics_getInt32(mediametrics_handle_t handle, attr_t attr,
+                                int32_t * value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return false;
+    return item->getInt32(attr, value);
+}
+
+bool mediametrics_getInt64(mediametrics_handle_t handle, attr_t attr,
+                                int64_t * value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return false;
+    return item->getInt64(attr, value);
+}
+
+bool mediametrics_getDouble(mediametrics_handle_t handle, attr_t attr,
+                                 double *value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return false;
+    return item->getDouble(attr, value);
+}
+
+bool mediametrics_getRate(mediametrics_handle_t handle, attr_t attr,
+                               int64_t * count, int64_t * duration, double *rate) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return false;
+    return item->getRate(attr, count, duration, rate);
+}
+
+// NB: caller owns the string that comes back, is responsible for freeing it
+bool mediametrics_getCString(mediametrics_handle_t handle, attr_t attr,
+                                 char **value) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return false;
+
+    return item->getCString(attr, value);
+}
+
+// to release strings returned via getCString()
+void mediametrics_freeCString(char *value) {
+    free(value);
+}
+
+bool mediametrics_selfRecord(mediametrics_handle_t handle) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return false;
+    return item->selfrecord();
+}
+
+
+const char *mediametrics_readable(mediametrics_handle_t handle) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return "";
+    return item->toCString();
+}
+
+int32_t mediametrics_count(mediametrics_handle_t handle) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return 0;
+    return item->count();
+}
+
+bool mediametrics_isEnabled() {
+    // static, so doesn't need an instance
+    return android::MediaAnalyticsItem::isEnabled();
+}
+
+#if 0
+// do not expose this as is.
+// need to revisit (or redefine) how the android::Parcel parameter is handled
+// so that it meets the stable-API criteria for updateable components.
+//
+int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) {
+        return -1;
+    }
+    return item->writeToParcel(parcel);
+}
+#endif
+
+
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 263cde7..b99cd91 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -134,6 +134,7 @@
         bool getRate(Attr, int64_t *count, int64_t *duration, double *rate);
         // Caller owns the returned string
         bool getCString(Attr, char **value);
+        bool getString(Attr, std::string *value);
 
         // parameter indicates whether to close any existing open
         // record with same key before establishing a new record
@@ -176,6 +177,8 @@
 
         std::string toString();
         std::string toString(int version);
+        const char *toCString();
+        const char *toCString(int version);
 
         // are we collecting analytics data
         static bool isEnabled();
diff --git a/media/libmediametrics/include/MediaMetrics.h b/media/libmediametrics/include/MediaMetrics.h
new file mode 100644
index 0000000..4d2f352
--- /dev/null
+++ b/media/libmediametrics/include/MediaMetrics.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_MEDIAMETRICS_H
+#define ANDROID_MEDIA_MEDIAMETRICS_H
+
+//
+// define a C interface to the media metrics functionality
+//
+// All functions that return a char * or const char * also give responsibility
+// for that string to the caller. The caller is responsible for calling free()
+// on that pointer when done using the value.
+
+__BEGIN_DECLS
+
+// internally re-cast to the behind-the-scenes C++ class instance
+typedef int64_t mediametrics_handle_t;
+typedef const char *mediametricskey_t;
+typedef const char *attr_t;
+
+mediametrics_handle_t mediametrics_create(mediametricskey_t key);
+void mediametrics_delete(mediametrics_handle_t handle);
+
+mediametricskey_t mediametrics_getKey(mediametrics_handle_t handle);
+
+
+// set
+void mediametrics_setInt32(mediametrics_handle_t handle, attr_t attr,
+                           int32_t value);
+void mediametrics_setInt64(mediametrics_handle_t handle, attr_t attr,
+                           int64_t value);
+void mediametrics_setDouble(mediametrics_handle_t handle, attr_t attr,
+                            double value);
+void mediametrics_setRate(mediametrics_handle_t handle, attr_t attr,
+                          int64_t count, int64_t duration);
+void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
+                            const char * value);
+
+// fused get/add/set; if attr wasn't there, it's a simple set.
+// these do not provide atomicity or mutual exclusion, only simpler code sequences.
+void mediametrics_addInt32(mediametrics_handle_t handle, attr_t attr,
+                           int32_t value);
+void mediametrics_addInt64(mediametrics_handle_t handle, attr_t attr,
+                           int64_t value);
+void mediametrics_addDouble(mediametrics_handle_t handle, attr_t attr,
+                            double value);
+void mediametrics_addRate(mediametrics_handle_t handle, attr_t attr,
+                          int64_t count, int64_t duration);
+
+// find & extract values
+// return indicates whether attr exists (and thus whether value filled in)
+// NULL parameter value suppresses storage of value.
+bool mediametrics_getInt32(mediametrics_handle_t handle, attr_t attr,
+                           int32_t * value);
+bool mediametrics_getInt64(mediametrics_handle_t handle, attr_t attr,
+                           int64_t * value);
+bool mediametrics_getDouble(mediametrics_handle_t handle, attr_t attr,
+                            double *value);
+bool mediametrics_getRate(mediametrics_handle_t handle, attr_t attr,
+                          int64_t * count, int64_t * duration, double *rate);
+bool mediametrics_getCString(mediametrics_handle_t handle, attr_t attr,
+                            char **value);
+// to release strings returned via getCString()
+void mediametrics_freeCString(char *value);
+
+// # of attributes set within this record.
+int32_t mediametrics_count(mediametrics_handle_t handle);
+
+bool mediametrics_selfRecord(mediametrics_handle_t handle);
+
+const char *mediametrics_readable(mediametrics_handle_t handle);
+void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid);
+bool mediametrics_isEnabled();
+
+#if 0
+// do not expose this as is.
+// need to revisit (or redefine) how the android::Parcel parameter is handled
+// so that it meets the stable-API criteria for updateable components.
+//
+int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel);
+#endif
+
+__END_DECLS
+
+#endif
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 2109ad1..6b43375 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -78,9 +78,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index 7c2191b..a01afa3 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -439,31 +439,16 @@
 
 size_t JAudioTrack::frameSize() {
     JNIEnv *env = JavaVMHelper::getJNIEnv();
-
-    // TODO: Calculated here implementing the logic in AudioTrack.java
-    // wait for AudioTrack.java exposing this parameter (i.e. getFrameSizeInBtytes())
-    jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
-    int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
+    jmethodID jGetFormat = env->GetMethodID(mAudioTrackCls,
+            "getFormat", "()Landroid/media/AudioFormat;");
+    jobject jAudioFormatObj = env->CallObjectMethod(mAudioTrackObj, jGetFormat);
 
     jclass jAudioFormatCls = env->FindClass("android/media/AudioFormat");
-    jmethodID jIsEncodingLinearFrames = env->GetStaticMethodID(
-            jAudioFormatCls, "isEncodingLinearFrames", "(I)Z");
-    jboolean javaIsEncodingLinearFrames = env->CallStaticBooleanMethod(
-            jAudioFormatCls, jIsEncodingLinearFrames, javaFormat);
+    jmethodID jGetFrameSizeInBytes = env->GetMethodID(
+            jAudioFormatCls, "getFrameSizeInBytes", "()I");
+    jint javaFrameSizeInBytes = env->CallIntMethod(jAudioFormatObj, jGetFrameSizeInBytes);
 
-    if (javaIsEncodingLinearFrames == false) {
-        return 1;
-    }
-
-    jmethodID jGetBytesPerSample = env->GetStaticMethodID(jAudioFormatCls,
-            "getBytesPerSample", "(I)I");
-    int javaBytesPerSample = env->CallStaticIntMethod(jAudioFormatCls,
-            jGetBytesPerSample, javaFormat);
-
-    jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
-    int javaChannelCount = env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
-
-    return javaChannelCount * javaBytesPerSample;
+    return (size_t)javaFrameSizeInBytes;
 }
 
 status_t JAudioTrack::dump(int fd, const Vector<String16>& args __unused) const
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
index 4b19e38..5e98589 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
@@ -260,11 +260,11 @@
     virtual void onMessageReceived(const sp<AMessage> & /* msg */) override { }
 
     // Modular DRM
-    virtual status_t prepareDrm(const uint8_t /* uuid */[16],
+    virtual status_t prepareDrm(int64_t /*srcId*/, const uint8_t /* uuid */[16],
                                 const Vector<uint8_t>& /* drmSessionId */) {
         return INVALID_OPERATION;
     }
-    virtual status_t releaseDrm() {
+    virtual status_t releaseDrm(int64_t /*srcId*/) {
         return INVALID_OPERATION;
     }
 
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index 4bc1a4a..c7cd7d2 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -105,8 +105,10 @@
             status_t        getParameter(int key, Parcel* reply);
 
             // Modular DRM
-            status_t        prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
-            status_t        releaseDrm();
+            status_t        prepareDrm(int64_t srcId,
+                                       const uint8_t uuid[16],
+                                       const Vector<uint8_t>& drmSessionId);
+            status_t        releaseDrm(int64_t srcId);
             // AudioRouting
             status_t        setPreferredDevice(jobject device);
             jobject         getRoutedDevice();
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index 921a5b7..2ae5a8c 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -1094,7 +1094,8 @@
 }
 
 // Modular DRM
-status_t MediaPlayer2::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId) {
+status_t MediaPlayer2::prepareDrm(
+        int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId) {
     // TODO change to ALOGV
     ALOGD("prepareDrm: uuid: %p  drmSessionId: %p(%zu)", uuid,
             drmSessionId.array(), drmSessionId.size());
@@ -1118,7 +1119,7 @@
     }
 
     // Passing down to mediaserver mainly for creating the crypto
-    status_t status = mPlayer->prepareDrm(uuid, drmSessionId);
+    status_t status = mPlayer->prepareDrm(srcId, uuid, drmSessionId);
     ALOGE_IF(status != OK, "prepareDrm: Failed at mediaserver with ret: %d", status);
 
     // TODO change to ALOGV
@@ -1127,7 +1128,7 @@
     return status;
 }
 
-status_t MediaPlayer2::releaseDrm() {
+status_t MediaPlayer2::releaseDrm(int64_t srcId) {
     Mutex::Autolock _l(mLock);
     if (mPlayer == NULL) {
         return NO_INIT;
@@ -1142,7 +1143,7 @@
         return INVALID_OPERATION;
     }
 
-    status_t status = mPlayer->releaseDrm();
+    status_t status = mPlayer->releaseDrm(srcId);
     // TODO change to ALOGV
     ALOGD("releaseDrm: mediaserver::releaseDrm ret: %d", status);
     if (status != OK) {
diff --git a/media/libmediaplayer2/nuplayer2/Android.bp b/media/libmediaplayer2/nuplayer2/Android.bp
index 93c218e..71cd50f 100644
--- a/media/libmediaplayer2/nuplayer2/Android.bp
+++ b/media/libmediaplayer2/nuplayer2/Android.bp
@@ -66,9 +66,6 @@
 
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
index 1860b0c..f01361b 100644
--- a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
@@ -322,7 +322,7 @@
 
     if (mLooper == NULL) {
         mLooper = new ALooper;
-        mLooper->setName("generic");
+        mLooper->setName("generic2");
         mLooper->start(false, /* runOnCallingThread */
                        true,  /* canCallJava */
                        PRIORITY_DEFAULT);
diff --git a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
index 175be53..e53900b 100644
--- a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
@@ -103,7 +103,7 @@
 void NuPlayer2::HTTPLiveSource2::prepareAsync(int64_t /* startTimeUs */) {
     if (mLiveLooper == NULL) {
         mLiveLooper = new ALooper;
-        mLiveLooper->setName("http live");
+        mLiveLooper->setName("http live2");
         mLiveLooper->start(false, /* runOnCallingThread */
                            true /* canCallJava */);
 
@@ -273,10 +273,10 @@
 
         if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
             notify->post();
-            msg->post(delayUs > 0ll ? delayUs : 0ll);
+            msg->post(delayUs > 0LL ? delayUs : 0LL);
             return;
         } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
-            if (delayUs < -1000000ll) { // 1 second
+            if (delayUs < -1000000LL) { // 1 second
                 continue;
             }
             notify->post();
@@ -288,7 +288,7 @@
     }
 
     // try again in 1 second
-    msg->post(1000000ll);
+    msg->post(1000000LL);
 }
 
 void NuPlayer2::HTTPLiveSource2::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 5ea600f..81ffbc7 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -240,8 +240,7 @@
       mVideoDecoderError(false),
       mPaused(false),
       mPausedByClient(true),
-      mPausedForBuffering(false),
-      mIsDrmProtected(false) {
+      mPausedForBuffering(false) {
     CHECK(mediaClock != NULL);
     clearFlushComplete();
 }
@@ -825,20 +824,31 @@
 
         case kWhatGetSelectedTrack:
         {
+            int32_t type32;
+            CHECK(msg->findInt32("type", (int32_t*)&type32));
+            media_track_type type = (media_track_type)type32;
+
+            size_t inbandTracks = 0;
             status_t err = INVALID_OPERATION;
+            ssize_t selectedTrack = -1;
             if (mCurrentSourceInfo.mSource != NULL) {
                 err = OK;
-
-                int32_t type32;
-                CHECK(msg->findInt32("type", (int32_t*)&type32));
-                media_track_type type = (media_track_type)type32;
-                ssize_t selectedTrack = mCurrentSourceInfo.mSource->getSelectedTrack(type);
-
-                PlayerMessage* reply;
-                CHECK(msg->findPointer("reply", (void**)&reply));
-                reply->add_values()->set_int32_value(selectedTrack);
+                inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
+                selectedTrack = mCurrentSourceInfo.mSource->getSelectedTrack(type);
             }
 
+            if (selectedTrack == -1 && mCCDecoder != NULL) {
+                err = OK;
+                selectedTrack = mCCDecoder->getSelectedTrack(type);
+                if (selectedTrack != -1) {
+                    selectedTrack += inbandTracks;
+                }
+            }
+
+            PlayerMessage* reply;
+            CHECK(msg->findPointer("reply", (void**)&reply));
+            reply->add_values()->set_int32_value(selectedTrack);
+
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
 
@@ -916,7 +926,7 @@
                 }
             }
 
-            msg->post(1000000ll);  // poll again in a second.
+            msg->post(1000000LL);  // poll again in a second.
             break;
         }
 
@@ -1184,7 +1194,7 @@
             }
 
             if (rescan) {
-                msg->post(100000ll);
+                msg->post(100000LL);
                 mScanSourcesPending = true;
             }
             break;
@@ -1619,7 +1629,7 @@
 
         case kWhatReleaseDrm:
         {
-            status_t status = onReleaseDrm();
+            status_t status = onReleaseDrm(msg);
 
             sp<AMessage> response = new AMessage;
             response->setInt32("status", status);
@@ -1661,7 +1671,7 @@
 }
 
 void NuPlayer2::onStart(bool play) {
-    ALOGV("onStart: mCrypto: %p", mCrypto.get());
+    ALOGV("onStart: mCrypto: %p", mCurrentSourceInfo.mCrypto.get());
 
     if (!mSourceStarted) {
         mSourceStarted = true;
@@ -1705,7 +1715,7 @@
                 && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
 
     // Modular DRM: Disabling audio offload if the source is protected
-    if (mOffloadAudio && mIsDrmProtected) {
+    if (mOffloadAudio && mCurrentSourceInfo.mIsDrmProtected) {
         mOffloadAudio = false;
         ALOGV("onStart: Disabling mOffloadAudio now that the source is protected.");
     }
@@ -1719,7 +1729,7 @@
     notify->setInt32("generation", mRendererGeneration);
     mRenderer = new Renderer(mAudioSink, mMediaClock, notify, flags);
     mRendererLooper = new ALooper;
-    mRendererLooper->setName("NuPlayerRenderer");
+    mRendererLooper->setName("NuPlayer2Renderer");
     mRendererLooper->start(false, true, ANDROID_PRIORITY_AUDIO);
     mRendererLooper->registerHandler(mRenderer);
 
@@ -1999,7 +2009,7 @@
                     && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
 
     // Modular DRM: Disabling audio offload if the source is protected
-    if (canOffload && mIsDrmProtected) {
+    if (canOffload && mCurrentSourceInfo.mIsDrmProtected) {
         canOffload = false;
         ALOGV("determineAudioModeChange: Disabling mOffloadAudio b/c the source is protected.");
     }
@@ -2106,10 +2116,11 @@
     (*decoder)->init();
 
     // Modular DRM
-    if (mIsDrmProtected) {
-        format->setObject("crypto", mCrypto);
+    if (mCurrentSourceInfo.mIsDrmProtected) {
+        format->setObject("crypto", mCurrentSourceInfo.mCrypto);
         ALOGV("instantiateDecoder: mCrypto: %p isSecure: %d",
-                mCrypto.get(), (mCurrentSourceInfo.mSourceFlags & Source::FLAG_SECURE) != 0);
+                mCurrentSourceInfo.mCrypto.get(),
+                (mCurrentSourceInfo.mSourceFlags & Source::FLAG_SECURE) != 0);
     }
 
     (*decoder)->configure(format);
@@ -2495,12 +2506,8 @@
     mRenderer.clear();
     ++mRendererGeneration;
 
-    if (mCurrentSourceInfo.mSource != NULL) {
-        mCurrentSourceInfo.mSource->stop();
-
-        Mutex::Autolock autoLock(mSourceLock);
-        mCurrentSourceInfo.mSource.clear();
-    }
+    resetSourceInfo(mCurrentSourceInfo);
+    resetSourceInfo(mNextSourceInfo);
 
     if (mDriver != NULL) {
         sp<NuPlayer2Driver> driver = mDriver.promote();
@@ -2514,14 +2521,6 @@
     mResetting = false;
     mSourceStarted = false;
 
-    // Modular DRM
-    if (mCrypto != NULL) {
-        // decoders will be flushed before this so their mCrypto would go away on their own
-        // TODO change to ALOGV
-        ALOGD("performReset mCrypto: %p", mCrypto.get());
-        mCrypto.clear();
-    }
-    mIsDrmProtected = false;
 }
 
 void NuPlayer2::performPlayNextDataSource() {
@@ -2575,15 +2574,6 @@
 
     addEndTimeMonitor();
 
-    // Modular DRM
-    if (mCrypto != NULL) {
-        // decoders will be flushed before this so their mCrypto would go away on their own
-        // TODO change to ALOGV
-        ALOGD("performReset mCrypto: %p", mCrypto.get());
-        mCrypto.clear();
-    }
-    mIsDrmProtected = false;
-
     if (mRenderer != NULL) {
         mRenderer->resume();
     }
@@ -2880,7 +2870,7 @@
             int64_t posMs;
             int64_t timeUs, posUs;
             driver->getCurrentPosition(&posMs);
-            posUs = posMs * 1000ll;
+            posUs = posMs * 1000LL;
             CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
             if (posUs < timeUs) {
@@ -3034,8 +3024,34 @@
     }
  }
 
+NuPlayer2::SourceInfo* NuPlayer2::getSourceInfoByIdInMsg(const sp<AMessage> &msg) {
+    int64_t srcId;
+    CHECK(msg->findInt64("srcId", &srcId));
+    if (mCurrentSourceInfo.mSrcId == srcId) {
+        return &mCurrentSourceInfo;
+    } else if (mNextSourceInfo.mSrcId == srcId) {
+        return &mNextSourceInfo;
+    } else {
+        return NULL;
+    }
+}
+
+void NuPlayer2::resetSourceInfo(NuPlayer2::SourceInfo &srcInfo) {
+    if (srcInfo.mSource != NULL) {
+        srcInfo.mSource->stop();
+
+        Mutex::Autolock autoLock(mSourceLock);
+        srcInfo.mSource.clear();
+    }
+    // Modular DRM
+    ALOGD("performReset mCrypto: %p", srcInfo.mCrypto.get());
+    srcInfo.mCrypto.clear();
+    srcInfo.mIsDrmProtected = false;
+}
+
 // Modular DRM begin
-status_t NuPlayer2::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+status_t NuPlayer2::prepareDrm(
+        int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
 {
     ALOGV("prepareDrm ");
 
@@ -3045,6 +3061,7 @@
     uint8_t UUID[16];
     memcpy(UUID, uuid, sizeof(UUID));
     Vector<uint8_t> sessionId = drmSessionId;
+    msg->setInt64("srcId", srcId);
     msg->setPointer("uuid", (void*)UUID);
     msg->setPointer("drmSessionId", (void*)&sessionId);
 
@@ -3061,11 +3078,12 @@
     return status;
 }
 
-status_t NuPlayer2::releaseDrm()
+status_t NuPlayer2::releaseDrm(int64_t srcId)
 {
     ALOGV("releaseDrm ");
 
     sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
+    msg->setInt64("srcId", srcId);
 
     sp<AMessage> response;
     status_t status = msg->postAndAwaitResponse(&response);
@@ -3086,8 +3104,15 @@
     ALOGD("onPrepareDrm ");
 
     status_t status = INVALID_OPERATION;
-    if (mCurrentSourceInfo.mSource == NULL) {
-        ALOGE("onPrepareDrm: No source. onPrepareDrm failed with %d.", status);
+    SourceInfo *srcInfo = getSourceInfoByIdInMsg(msg);
+    if (srcInfo == NULL) {
+        return status;
+    }
+
+    int64_t srcId = srcInfo->mSrcId;
+    if (srcInfo->mSource == NULL) {
+        ALOGE("onPrepareDrm: srcInfo(%lld) No source. onPrepareDrm failed with %d.",
+                (long long)srcId, status);
         return status;
     }
 
@@ -3099,42 +3124,50 @@
     status = OK;
     sp<AMediaCryptoWrapper> crypto = NULL;
 
-    status = mCurrentSourceInfo.mSource->prepareDrm(uuid, *drmSessionId, &crypto);
+    status = srcInfo->mSource->prepareDrm(uuid, *drmSessionId, &crypto);
     if (crypto == NULL) {
-        ALOGE("onPrepareDrm: mCurrentSourceInfo.mSource->prepareDrm failed. status: %d", status);
+        ALOGE("onPrepareDrm: srcInfo(%lld).mSource->prepareDrm failed. status: %d",
+                (long long)srcId, status);
         return status;
     }
-    ALOGV("onPrepareDrm: mCurrentSourceInfo.mSource->prepareDrm succeeded");
+    ALOGV("onPrepareDrm: srcInfo(%lld).mSource->prepareDrm succeeded", (long long)srcId);
 
-    if (mCrypto != NULL) {
-        ALOGE("onPrepareDrm: Unexpected. Already having mCrypto: %p", mCrypto.get());
-        mCrypto.clear();
+    if (srcInfo->mCrypto != NULL) {
+        ALOGE("onPrepareDrm: srcInfo(%lld) Unexpected. Already having mCrypto: %p",
+                (long long)srcId, srcInfo->mCrypto.get());
+        srcInfo->mCrypto.clear();
     }
 
-    mCrypto = crypto;
-    mIsDrmProtected = true;
+    srcInfo->mCrypto = crypto;
+    srcInfo->mIsDrmProtected = true;
     // TODO change to ALOGV
-    ALOGD("onPrepareDrm: mCrypto: %p", mCrypto.get());
+    ALOGD("onPrepareDrm: mCrypto: %p", srcInfo->mCrypto.get());
 
     return status;
 }
 
-status_t NuPlayer2::onReleaseDrm()
+status_t NuPlayer2::onReleaseDrm(const sp<AMessage> &msg)
 {
     // TODO change to ALOGV
     ALOGD("onReleaseDrm ");
-
-    if (!mIsDrmProtected) {
-        ALOGW("onReleaseDrm: Unexpected. mIsDrmProtected is already false.");
+    SourceInfo *srcInfo = getSourceInfoByIdInMsg(msg);;
+    if (srcInfo == NULL) {
+        return INVALID_OPERATION;
     }
 
-    mIsDrmProtected = false;
+    int64_t srcId = srcInfo->mSrcId;
+    if (!srcInfo->mIsDrmProtected) {
+        ALOGW("onReleaseDrm: srcInfo(%lld) Unexpected. mIsDrmProtected is already false.",
+                (long long)srcId);
+    }
+
+    srcInfo->mIsDrmProtected = false;
 
     status_t status;
-    if (mCrypto != NULL) {
+    if (srcInfo->mCrypto != NULL) {
         // notifying the source first before removing crypto from codec
-        if (mCurrentSourceInfo.mSource != NULL) {
-            mCurrentSourceInfo.mSource->releaseDrm();
+        if (srcInfo->mSource != NULL) {
+            srcInfo->mSource->releaseDrm();
         }
 
         status=OK;
@@ -3155,9 +3188,9 @@
         }
 
         // TODO change to ALOGV
-        ALOGD("onReleaseDrm: mCrypto: %p", mCrypto.get());
-        mCrypto.clear();
-    } else {   // mCrypto == NULL
+        ALOGD("onReleaseDrm: mCrypto: %p", srcInfo->mCrypto.get());
+        srcInfo->mCrypto.clear();
+    } else {   // srcInfo->mCrypto == NULL
         ALOGE("onReleaseDrm: Unexpected. There is no crypto.");
         status = INVALID_OPERATION;
     }
@@ -3229,11 +3262,13 @@
 
 NuPlayer2::SourceInfo & NuPlayer2::SourceInfo::operator=(const NuPlayer2::SourceInfo &other) {
     mSource = other.mSource;
+    mCrypto = other.mCrypto;
     mDataSourceType = (DATA_SOURCE_TYPE)other.mDataSourceType;
     mSrcId = other.mSrcId;
     mSourceFlags = other.mSourceFlags;
     mStartTimeUs = other.mStartTimeUs;
     mEndTimeUs = other.mEndTimeUs;
+    mIsDrmProtected = other.mIsDrmProtected;
     return *this;
 }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
index 3ecdb01..e9b5f11 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
@@ -92,8 +92,8 @@
     float getFrameRate();
 
     // Modular DRM
-    status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
-    status_t releaseDrm();
+    status_t prepareDrm(int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+    status_t releaseDrm(int64_t srcId);
 
     const char *getDataSourceType();
 
@@ -178,6 +178,9 @@
         uint32_t mSourceFlags;
         int64_t mStartTimeUs;
         int64_t mEndTimeUs;
+        // Modular DRM
+        sp<AMediaCryptoWrapper> mCrypto;
+        bool mIsDrmProtected = false;
     };
 
     wp<NuPlayer2Driver> mDriver;
@@ -269,10 +272,6 @@
     // Pause state as requested by source (internally) due to buffering
     bool mPausedForBuffering;
 
-    // Modular DRM
-    sp<AMediaCryptoWrapper> mCrypto;
-    bool mIsDrmProtected;
-
     inline const sp<DecoderBase> &getDecoder(bool audio) {
         return audio ? mAudioDecoder : mVideoDecoder;
     }
@@ -351,7 +350,10 @@
     void writeTrackInfo(PlayerMessage* reply, const sp<AMessage>& format) const;
 
     status_t onPrepareDrm(const sp<AMessage> &msg);
-    status_t onReleaseDrm();
+    status_t onReleaseDrm(const sp<AMessage> &msg);
+
+    SourceInfo* getSourceInfoByIdInMsg(const sp<AMessage> &msg);
+    void resetSourceInfo(SourceInfo &srcInfo);
 
     DISALLOW_EVIL_CONSTRUCTORS(NuPlayer2);
 };
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
index e215965..98c3403 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
@@ -199,6 +199,18 @@
     return OK;
 }
 
+ssize_t NuPlayer2::CCDecoder::getSelectedTrack(media_track_type type) const {
+    if (mSelectedTrack != -1) {
+        CCTrack track = mTracks[mSelectedTrack];
+        if (track.mTrackType == kTrackTypeCEA608 || track.mTrackType == kTrackTypeCEA708) {
+            return (type == MEDIA_TRACK_TYPE_SUBTITLE ? mSelectedTrack : -1);
+        }
+        return (type == MEDIA_TRACK_TYPE_UNKNOWN ? mSelectedTrack : -1);
+    }
+
+    return -1;
+}
+
 bool NuPlayer2::CCDecoder::isSelected() const {
     return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)getTrackCount();
 }
@@ -555,7 +567,7 @@
 
         ccBuf->meta()->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, mSelectedTrack);
         ccBuf->meta()->setInt64("timeUs", timeUs);
-        ccBuf->meta()->setInt64("durationUs", 0ll);
+        ccBuf->meta()->setInt64("durationUs", 0LL);
 
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatClosedCaptionData);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
index 57d5ea2..97834d1 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
@@ -38,6 +38,7 @@
     size_t getTrackCount() const;
     sp<AMessage> getTrackInfo(size_t index) const;
     status_t selectTrack(size_t index, bool select);
+    ssize_t getSelectedTrack(media_track_type type) const;
     bool isSelected() const;
     void decode(const sp<ABuffer> &accessUnit);
     void display(int64_t timeUs);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 931b86e..49e3e3b 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -71,10 +71,10 @@
       mCCDecoder(ccDecoder),
       mPid(pid),
       mUid(uid),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
-      mNumFramesTotal(0ll),
-      mNumInputFramesDropped(0ll),
-      mNumOutputFramesDropped(0ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
+      mNumFramesTotal(0LL),
+      mNumInputFramesDropped(0LL),
+      mNumOutputFramesDropped(0LL),
       mVideoWidth(0),
       mVideoHeight(0),
       mIsAudio(true),
@@ -428,10 +428,10 @@
         // TODO: For now, layer fps is calculated for some specific architectures.
         // But it really should be extracted from the stream.
         mVideoTemporalLayerAggregateFps[0] =
-            mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+            mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - 1));
         for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
             mVideoTemporalLayerAggregateFps[i] =
-                mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+                mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - i))
                 + mVideoTemporalLayerAggregateFps[i - 1];
         }
     }
@@ -952,7 +952,7 @@
 
             int32_t layerId = 0;
             bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
-            if (mRenderer->getVideoLateByUs() > 100000ll
+            if (mRenderer->getVideoLateByUs() > 100000LL
                     && mIsVideoAVC
                     && !IsAVCReferenceFrame(accessUnit)) {
                 dropAccessUnit = true;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
index 9c1988f..914f29f 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
@@ -38,7 +38,7 @@
     // Every decoder has its own looper because MediaCodec operations
     // are blocking, but NuPlayer2 needs asynchronous operations.
     mDecoderLooper = new ALooper;
-    mDecoderLooper->setName("NPDecoder");
+    mDecoderLooper->setName("NP2Decoder");
     mDecoderLooper->start(false, /* runOnCallingThread */
                           true,  /* canCallJava */
                           ANDROID_PRIORITY_AUDIO);
@@ -122,7 +122,7 @@
         mRequestInputBuffersPending = true;
 
         sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
-        msg->post(10 * 1000ll);
+        msg->post(10 * 1000LL);
     }
 }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
index 0e0c1d8..0514e88 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
@@ -46,7 +46,7 @@
     : DecoderBase(notify),
       mSource(source),
       mRenderer(renderer),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
       mReachedEOS(true),
       mPendingAudioErr(OK),
       mPendingBuffersToDrain(0),
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 821dc9f..56d708a 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -372,7 +372,7 @@
     ALOGD("seekTo(%p) (%lld ms, %d) at state %d", this, (long long)msec, mode, mState);
     Mutex::Autolock autoLock(mLock);
 
-    int64_t seekTimeUs = msec * 1000ll;
+    int64_t seekTimeUs = msec * 1000LL;
 
     switch (mState) {
         case STATE_PREPARED:
@@ -426,7 +426,7 @@
         return UNKNOWN_ERROR;
     }
 
-    *msec = (mDurationUs + 500ll) / 1000;
+    *msec = (mDurationUs + 500LL) / 1000;
 
     return OK;
 }
@@ -612,7 +612,7 @@
             int64_t msec = 0;
             // getCurrentPosition should always return OK
             getCurrentPosition(&msec);
-            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000ll);
+            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000LL);
         }
 
         case MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK:
@@ -976,24 +976,25 @@
 }
 
 // Modular DRM
-status_t NuPlayer2Driver::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+status_t NuPlayer2Driver::prepareDrm(
+        int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
 {
     ALOGV("prepareDrm(%p) state: %d", this, mState);
 
     // leaving the state verification for mediaplayer.cpp
-    status_t ret = mPlayer->prepareDrm(uuid, drmSessionId);
+    status_t ret = mPlayer->prepareDrm(srcId, uuid, drmSessionId);
 
     ALOGV("prepareDrm ret: %d", ret);
 
     return ret;
 }
 
-status_t NuPlayer2Driver::releaseDrm()
+status_t NuPlayer2Driver::releaseDrm(int64_t srcId)
 {
     ALOGV("releaseDrm(%p) state: %d", this, mState);
 
     // leaving the state verification for mediaplayer.cpp
-    status_t ret = mPlayer->releaseDrm();
+    status_t ret = mPlayer->releaseDrm(srcId);
 
     ALOGV("releaseDrm ret: %d", ret);
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
index 50ee173..bb30c76 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
@@ -82,8 +82,9 @@
     void notifyFlagsChanged(int64_t srcId, uint32_t flags);
 
     // Modular DRM
-    virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
-    virtual status_t releaseDrm();
+    virtual status_t prepareDrm(
+            int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+    virtual status_t releaseDrm(int64_t srcId);
 
 protected:
     virtual ~NuPlayer2Driver();
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index d800412..9d9e179 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -67,10 +67,10 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 10000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000LL;
 
 // Maximum allowed delay from AudioSink, 1.5 seconds.
-static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
 
 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
 
@@ -84,7 +84,7 @@
 };
 
 // static
-const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000ll;
+const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
 
 NuPlayer2::Renderer::Renderer(
         const sp<MediaPlayer2Interface::AudioSink> &sink,
@@ -108,7 +108,7 @@
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorNumFramesWritten(-1),
-      mVideoLateByUs(0ll),
+      mVideoLateByUs(0LL),
       mNextVideoTimeMediaUs(-1),
       mHasAudio(false),
       mHasVideo(false),
@@ -1142,7 +1142,7 @@
         int64_t nowUs = ALooper::GetNowUs();
         int64_t mediaUs;
         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
-            return 0ll;
+            return 0LL;
         } else {
             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
         }
@@ -1357,7 +1357,7 @@
         tooLate = false;
     }
 
-    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
+    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
     entry->mNotifyConsumed->setInt32("render", !tooLate);
     entry->mNotifyConsumed->post();
     mVideoQueue.erase(mVideoQueue.begin());
@@ -1489,7 +1489,7 @@
 
     ALOGV("queueDiff = %.2f secs", diff / 1E6);
 
-    if (diff > 100000ll) {
+    if (diff > 100000LL) {
         // Audio data starts More than 0.1 secs before video.
         // Drop some audio.
 
diff --git a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
index 53c66c0..a70269e 100644
--- a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
@@ -30,7 +30,7 @@
 
 namespace android {
 
-const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+const int64_t kNearEOSTimeoutUs = 2000000LL; // 2 secs
 
 // Default Buffer Underflow/Prepare/StartServer/Overflow Marks
 static const int kUnderflowMarkMs   =  1000;  // 1 second
@@ -105,7 +105,7 @@
 
     if (mLooper == NULL) {
         mLooper = new ALooper;
-        mLooper->setName("rtsp");
+        mLooper->setName("rtsp2");
         mLooper->start();
 
         mLooper->registerHandler(this);
@@ -168,7 +168,7 @@
     // We're going to buffer at least 2 secs worth data on all tracks before
     // starting playback (both at startup and after a seek).
 
-    static const int64_t kMinDurationUs = 2000000ll;
+    static const int64_t kMinDurationUs = 2000000LL;
 
     int64_t mediaDurationUs = 0;
     getDuration(&mediaDurationUs);
@@ -272,7 +272,7 @@
 }
 
 status_t NuPlayer2::RTSPSource2::getDuration(int64_t *durationUs) {
-    *durationUs = -1ll;
+    *durationUs = -1LL;
 
     int64_t audioDurationUs;
     if (mAudioTrack != NULL
@@ -321,7 +321,7 @@
 
 void NuPlayer2::RTSPSource2::schedulePollBuffering() {
     sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
-    msg->post(1000000ll); // 1 second intervals
+    msg->post(1000000LL); // 1 second intervals
 }
 
 void NuPlayer2::RTSPSource2::checkBuffering(
@@ -345,10 +345,10 @@
         int64_t maxRebufferingMarkUs;
         {
             Mutex::Autolock _l(mBufferingSettingsLock);
-            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000ll;
+            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000LL;
             // TODO: maxRebufferingMarkUs could be larger than
             // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
-            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000ll;
+            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000LL;
         }
         // isFinished when duration is 0 checks for EOS result only
         if (bufferedDurationUs > initialMarkUs
@@ -368,7 +368,7 @@
                 ++overflowCount;
             }
             int64_t startServerMarkUs =
-                    (kUnderflowMarkMs * 1000ll + maxRebufferingMarkUs) / 2;
+                    (kUnderflowMarkMs * 1000LL + maxRebufferingMarkUs) / 2;
             if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
@@ -639,7 +639,7 @@
                 int64_t nptUs =
                     ((double)rtpTime - (double)info->mRTPTime)
                         / info->mTimeScale
-                        * 1000000ll
+                        * 1000000LL
                         + info->mNormalPlaytimeUs;
 
                 accessUnit->meta()->setInt64("timeUs", nptUs);
@@ -747,7 +747,7 @@
         TrackInfo info;
         info.mTimeScale = timeScale;
         info.mRTPTime = 0;
-        info.mNormalPlaytimeUs = 0ll;
+        info.mNormalPlaytimeUs = 0LL;
         info.mNPTMappingValid = false;
 
         if ((isAudio && mAudioTrack == NULL)
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 09b19d7..51879fd 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -68,9 +68,6 @@
 
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e3ae02e..eae52c2 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -46,6 +46,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/OggWriter.h>
 #include <media/stagefright/PersistentSurface.h>
 #include <media/MediaProfiles.h>
 #include <camera/CameraParameters.h>
@@ -948,6 +949,10 @@
             status = setupMPEG2TSRecording();
             break;
 
+        case OUTPUT_FORMAT_OGG:
+            status = setupOggRecording();
+            break;
+
         default:
             ALOGE("Unsupported output file format: %d", mOutputFormat);
             status = UNKNOWN_ERROR;
@@ -1013,6 +1018,7 @@
         case OUTPUT_FORMAT_AAC_ADTS:
         case OUTPUT_FORMAT_RTP_AVP:
         case OUTPUT_FORMAT_MPEG2TS:
+        case OUTPUT_FORMAT_OGG:
         {
             sp<MetaData> meta = new MetaData;
             int64_t startTimeUs = systemTime() / 1000;
@@ -1113,6 +1119,9 @@
             format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
             format->setInt32("aac-profile", OMX_AUDIO_AACObjectELD);
             break;
+        case AUDIO_ENCODER_OPUS:
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_OPUS);
+            break;
 
         default:
             ALOGE("Unknown audio encoder: %d", mAudioEncoder);
@@ -1169,6 +1178,13 @@
     return setupRawAudioRecording();
 }
 
+status_t StagefrightRecorder::setupOggRecording() {
+    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_OGG);
+
+    mWriter = new OggWriter(mOutputFd);
+    return setupRawAudioRecording();
+}
+
 status_t StagefrightRecorder::setupAMRRecording() {
     CHECK(mOutputFormat == OUTPUT_FORMAT_AMR_NB ||
           mOutputFormat == OUTPUT_FORMAT_AMR_WB);
@@ -1813,6 +1829,7 @@
         case AUDIO_ENCODER_AAC:
         case AUDIO_ENCODER_HE_AAC:
         case AUDIO_ENCODER_AAC_ELD:
+        case AUDIO_ENCODER_OPUS:
             break;
 
         default:
@@ -1863,19 +1880,18 @@
         mTotalBitRate += mVideoBitRate;
     }
 
-    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
-        // Audio source is added at the end if it exists.
-        // This help make sure that the "recoding" sound is suppressed for
-        // camcorder applications in the recorded files.
-        // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
-        // disable audio for time lapse recording
-        bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
-        if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
-            err = setupAudioEncoder(writer);
-            if (err != OK) return err;
-            mTotalBitRate += mAudioBitRate;
-        }
+    // Audio source is added at the end if it exists.
+    // This help make sure that the "recoding" sound is suppressed for
+    // camcorder applications in the recorded files.
+    // disable audio for time lapse recording
+    const bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
+    if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
+        err = setupAudioEncoder(writer);
+        if (err != OK) return err;
+        mTotalBitRate += mAudioBitRate;
+    }
 
+    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
         if (mCaptureFpsEnable) {
             mp4writer->setCaptureRate(mCaptureFps);
         }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index faa2e59..2ada301 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -166,6 +166,7 @@
     void setupMPEG4orWEBMMetaData(sp<MetaData> *meta);
     status_t setupAMRRecording();
     status_t setupAACRecording();
+    status_t setupOggRecording();
     status_t setupRawAudioRecording();
     status_t setupRTPRecording();
     status_t setupMPEG2TSRecording();
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
index a4da564..23a19e7 100644
--- a/media/libmediaplayerservice/nuplayer/Android.bp
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -56,9 +56,6 @@
 
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index f3b69d6..e2aa8f8 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -68,7 +68,7 @@
       mVideoDataGeneration(0),
       mFetchSubtitleDataGeneration(0),
       mFetchTimedTextDataGeneration(0),
-      mDurationUs(-1ll),
+      mDurationUs(-1LL),
       mAudioIsVorbis(false),
       mIsSecure(false),
       mIsStreaming(false),
@@ -76,7 +76,7 @@
       mUID(uid),
       mMediaClock(mediaClock),
       mFd(-1),
-      mBitrate(-1ll),
+      mBitrate(-1LL),
       mPendingReadBufferTypes(0) {
     ALOGV("GenericSource");
     CHECK(mediaClock != NULL);
@@ -727,7 +727,7 @@
     }
 
     if (msg->what() == kWhatFetchSubtitleData) {
-        subTimeUs -= 1000000ll;  // send subtile data one second earlier
+        subTimeUs -= 1000000LL;  // send subtile data one second earlier
     }
     sp<AMessage> msg2 = new AMessage(sendWhat, this);
     msg2->setInt32("generation", msgGeneration);
@@ -764,7 +764,7 @@
         notify->post();
 
         if (msg->what() == kWhatSendSubtitleData) {
-            nextSubTimeUs -= 1000000ll;  // send subtile data one second earlier
+            nextSubTimeUs -= 1000000LL;  // send subtile data one second earlier
         }
         mMediaClock->addTimer(msg, nextSubTimeUs);
     }
@@ -855,7 +855,7 @@
         // TODO: maxRebufferingMarkMs could be larger than
         // mBufferingSettings.mResumePlaybackMarkMs
         int64_t restartBufferingMarkUs =
-             mBufferingSettings.mResumePlaybackMarkMs * 1000ll / 2;
+             mBufferingSettings.mResumePlaybackMarkMs * 1000LL / 2;
         if (finalResult == OK) {
             if (durationUs < restartBufferingMarkUs) {
                 postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
@@ -1446,7 +1446,7 @@
         // TODO: maxRebufferingMarkMs could be larger than
         // mBufferingSettings.mResumePlaybackMarkMs
         int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs
-            : mBufferingSettings.mResumePlaybackMarkMs) * 1000ll;
+            : mBufferingSettings.mResumePlaybackMarkMs) * 1000LL;
         if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
             if (mPreparing || mSentPauseOnBuffering) {
                 Track *counterTrack =
@@ -1514,12 +1514,12 @@
     sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
     msg->setInt32("generation", mPollBufferingGeneration);
     // Enquires buffering status every second.
-    msg->post(1000000ll);
+    msg->post(1000000LL);
 }
 
 void NuPlayer::GenericSource::onPollBuffering() {
     status_t finalStatus = UNKNOWN_ERROR;
-    int64_t cachedDurationUs = -1ll;
+    int64_t cachedDurationUs = -1LL;
     ssize_t cachedDataRemaining = -1;
 
     if (mCachedSource != NULL) {
@@ -1527,15 +1527,15 @@
 
         if (finalStatus == OK) {
             off64_t size;
-            int64_t bitrate = 0ll;
+            int64_t bitrate = 0LL;
             if (mDurationUs > 0 && mCachedSource->getSize(&size) == OK) {
                 // |bitrate| uses bits/second unit, while size is number of bytes.
-                bitrate = size * 8000000ll / mDurationUs;
+                bitrate = size * 8000000LL / mDurationUs;
             } else if (mBitrate > 0) {
                 bitrate = mBitrate;
             }
             if (bitrate > 0) {
-                cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
+                cachedDurationUs = cachedDataRemaining * 8000000LL / bitrate;
             }
         }
     }
@@ -1560,8 +1560,8 @@
         return;
     }
 
-    if (cachedDurationUs >= 0ll) {
-        if (mDurationUs > 0ll) {
+    if (cachedDurationUs >= 0LL) {
+        if (mDurationUs > 0LL) {
             int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
             int percentage = 100.0 * cachedPosUs / mDurationUs;
             if (percentage > 100) {
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 11f1bfd..77e7885 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -271,10 +271,10 @@
 
         if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
             notify->post();
-            msg->post(delayUs > 0ll ? delayUs : 0ll);
+            msg->post(delayUs > 0LL ? delayUs : 0LL);
             return;
         } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
-            if (delayUs < -1000000ll) { // 1 second
+            if (delayUs < -1000000LL) { // 1 second
                 continue;
             }
             notify->post();
@@ -286,7 +286,7 @@
     }
 
     // try again in 1 second
-    msg->post(1000000ll);
+    msg->post(1000000LL);
 }
 
 void NuPlayer::HTTPLiveSource::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 0784939..5cf6bbd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -659,20 +659,31 @@
 
         case kWhatGetSelectedTrack:
         {
+            int32_t type32;
+            CHECK(msg->findInt32("type", (int32_t*)&type32));
+            media_track_type type = (media_track_type)type32;
+
+            size_t inbandTracks = 0;
             status_t err = INVALID_OPERATION;
+            ssize_t selectedTrack = -1;
             if (mSource != NULL) {
                 err = OK;
-
-                int32_t type32;
-                CHECK(msg->findInt32("type", (int32_t*)&type32));
-                media_track_type type = (media_track_type)type32;
-                ssize_t selectedTrack = mSource->getSelectedTrack(type);
-
-                Parcel* reply;
-                CHECK(msg->findPointer("reply", (void**)&reply));
-                reply->writeInt32(selectedTrack);
+                inbandTracks = mSource->getTrackCount();
+                selectedTrack = mSource->getSelectedTrack(type);
             }
 
+            if (selectedTrack == -1 && mCCDecoder != NULL) {
+                err = OK;
+                selectedTrack = mCCDecoder->getSelectedTrack(type);
+                if (selectedTrack != -1) {
+                    selectedTrack += inbandTracks;
+                }
+            }
+
+            Parcel* reply;
+            CHECK(msg->findPointer("reply", (void**)&reply));
+            reply->writeInt32(selectedTrack);
+
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
 
@@ -750,7 +761,7 @@
                 }
             }
 
-            msg->post(1000000ll);  // poll again in a second.
+            msg->post(1000000LL);  // poll again in a second.
             break;
         }
 
@@ -1038,7 +1049,7 @@
             }
 
             if (rescan) {
-                msg->post(100000ll);
+                msg->post(100000LL);
                 mScanSourcesPending = true;
             }
             break;
@@ -2659,7 +2670,7 @@
             int posMs;
             int64_t timeUs, posUs;
             driver->getCurrentPosition(&posMs);
-            posUs = (int64_t) posMs * 1000ll;
+            posUs = (int64_t) posMs * 1000LL;
             CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
             if (posUs < timeUs) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index fb12360..0156ad2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -200,6 +200,18 @@
     return OK;
 }
 
+ssize_t NuPlayer::CCDecoder::getSelectedTrack(media_track_type type) const {
+    if (mSelectedTrack != -1) {
+        CCTrack track = mTracks[mSelectedTrack];
+        if (track.mTrackType == kTrackTypeCEA608 || track.mTrackType == kTrackTypeCEA708) {
+            return (type == MEDIA_TRACK_TYPE_SUBTITLE ? mSelectedTrack : -1);
+        }
+        return (type == MEDIA_TRACK_TYPE_UNKNOWN ? mSelectedTrack : -1);
+    }
+
+    return -1;
+}
+
 bool NuPlayer::CCDecoder::isSelected() const {
     return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)getTrackCount();
 }
@@ -542,7 +554,7 @@
 
         ccBuf->meta()->setInt32("track-index", mSelectedTrack);
         ccBuf->meta()->setInt64("timeUs", timeUs);
-        ccBuf->meta()->setInt64("durationUs", 0ll);
+        ccBuf->meta()->setInt64("durationUs", 0LL);
 
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatClosedCaptionData);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
index f310f37..37b8230 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
@@ -38,6 +38,7 @@
     size_t getTrackCount() const;
     sp<AMessage> getTrackInfo(size_t index) const;
     status_t selectTrack(size_t index, bool select);
+    ssize_t getSelectedTrack(media_track_type type) const;
     bool isSelected() const;
     void decode(const sp<ABuffer> &accessUnit);
     void display(int64_t timeUs);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index a2ec699..df1ffde 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -71,10 +71,10 @@
       mCCDecoder(ccDecoder),
       mPid(pid),
       mUid(uid),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
-      mNumFramesTotal(0ll),
-      mNumInputFramesDropped(0ll),
-      mNumOutputFramesDropped(0ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
+      mNumFramesTotal(0LL),
+      mNumInputFramesDropped(0LL),
+      mNumOutputFramesDropped(0LL),
       mVideoWidth(0),
       mVideoHeight(0),
       mIsAudio(true),
@@ -409,10 +409,10 @@
         // TODO: For now, layer fps is calculated for some specific architectures.
         // But it really should be extracted from the stream.
         mVideoTemporalLayerAggregateFps[0] =
-            mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+            mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - 1));
         for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
             mVideoTemporalLayerAggregateFps[i] =
-                mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+                mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - i))
                 + mVideoTemporalLayerAggregateFps[i - 1];
         }
     }
@@ -934,7 +934,7 @@
 
             int32_t layerId = 0;
             bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
-            if (mRenderer->getVideoLateByUs() > 100000ll
+            if (mRenderer->getVideoLateByUs() > 100000LL
                     && mIsVideoAVC
                     && !IsAVCReferenceFrame(accessUnit)) {
                 dropAccessUnit = true;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index d0de7b0..3e96d27 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -120,7 +120,7 @@
         mRequestInputBuffersPending = true;
 
         sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
-        msg->post(10 * 1000ll);
+        msg->post(10 * 1000LL);
     }
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 6b05b53..0997e7d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -47,7 +47,7 @@
     : DecoderBase(notify),
       mSource(source),
       mRenderer(renderer),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
       mReachedEOS(true),
       mPendingAudioErr(OK),
       mPendingBuffersToDrain(0),
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 44f223d..ba3ebaa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -474,7 +474,7 @@
     ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
     Mutex::Autolock autoLock(mLock);
 
-    int64_t seekTimeUs = msec * 1000ll;
+    int64_t seekTimeUs = msec * 1000LL;
 
     switch (mState) {
         case STATE_PREPARED:
@@ -531,7 +531,7 @@
         return UNKNOWN_ERROR;
     }
 
-    *msec = (mDurationUs + 500ll) / 1000;
+    *msec = (mDurationUs + 500LL) / 1000;
 
     return OK;
 }
@@ -744,7 +744,7 @@
             int msec = 0;
             // getCurrentPosition should always return OK
             getCurrentPosition(&msec);
-            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000ll);
+            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000LL);
         }
 
         case INVOKE_ID_UNSELECT_TRACK:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index b258332..c8f6738 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -70,10 +70,10 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 10000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000LL;
 
 // Maximum allowed delay from AudioSink, 1.5 seconds.
-static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
 
 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
 
@@ -125,7 +125,7 @@
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorNumFramesWritten(-1),
-      mVideoLateByUs(0ll),
+      mVideoLateByUs(0LL),
       mNextVideoTimeMediaUs(-1),
       mHasAudio(false),
       mHasVideo(false),
@@ -580,7 +580,7 @@
                 // play back.
                 int64_t delayUs =
                     mAudioSink->msecsPerFrame()
-                        * numFramesPendingPlayout * 1000ll;
+                        * numFramesPendingPlayout * 1000LL;
                 if (mPlaybackRate > 1.0f) {
                     delayUs /= mPlaybackRate;
                 }
@@ -1172,7 +1172,7 @@
         int64_t nowUs = ALooper::GetNowUs();
         int64_t mediaUs;
         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
-            return 0ll;
+            return 0LL;
         } else {
             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
         }
@@ -1387,7 +1387,7 @@
         tooLate = false;
     }
 
-    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
+    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
     entry->mNotifyConsumed->setInt32("render", !tooLate);
     entry->mNotifyConsumed->post();
     mVideoQueue.erase(mVideoQueue.begin());
@@ -1519,7 +1519,7 @@
 
     ALOGV("queueDiff = %.2f secs", diff / 1E6);
 
-    if (diff > 100000ll) {
+    if (diff > 100000LL) {
         // Audio data starts More than 0.1 secs before video.
         // Drop some audio.
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 851217b..bf14ec2 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -30,7 +30,7 @@
 
 namespace android {
 
-const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+const int64_t kNearEOSTimeoutUs = 2000000LL; // 2 secs
 
 // Default Buffer Underflow/Prepare/StartServer/Overflow Marks
 static const int kUnderflowMarkMs   =  1000;  // 1 second
@@ -169,7 +169,7 @@
     // We're going to buffer at least 2 secs worth data on all tracks before
     // starting playback (both at startup and after a seek).
 
-    static const int64_t kMinDurationUs = 2000000ll;
+    static const int64_t kMinDurationUs = 2000000LL;
 
     int64_t mediaDurationUs = 0;
     getDuration(&mediaDurationUs);
@@ -273,7 +273,7 @@
 }
 
 status_t NuPlayer::RTSPSource::getDuration(int64_t *durationUs) {
-    *durationUs = -1ll;
+    *durationUs = -1LL;
 
     int64_t audioDurationUs;
     if (mAudioTrack != NULL
@@ -322,7 +322,7 @@
 
 void NuPlayer::RTSPSource::schedulePollBuffering() {
     sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
-    msg->post(1000000ll); // 1 second intervals
+    msg->post(1000000LL); // 1 second intervals
 }
 
 void NuPlayer::RTSPSource::checkBuffering(
@@ -346,10 +346,10 @@
         int64_t maxRebufferingMarkUs;
         {
             Mutex::Autolock _l(mBufferingSettingsLock);
-            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000ll;
+            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000LL;
             // TODO: maxRebufferingMarkUs could be larger than
             // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
-            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000ll;
+            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000LL;
         }
         // isFinished when duration is 0 checks for EOS result only
         if (bufferedDurationUs > initialMarkUs
@@ -369,7 +369,7 @@
                 ++overflowCount;
             }
             int64_t startServerMarkUs =
-                    (kUnderflowMarkMs * 1000ll + maxRebufferingMarkUs) / 2;
+                    (kUnderflowMarkMs * 1000LL + maxRebufferingMarkUs) / 2;
             if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
@@ -640,7 +640,7 @@
                 int64_t nptUs =
                     ((double)rtpTime - (double)info->mRTPTime)
                         / info->mTimeScale
-                        * 1000000ll
+                        * 1000000LL
                         + info->mNormalPlaytimeUs;
 
                 accessUnit->meta()->setInt64("timeUs", nptUs);
@@ -748,7 +748,7 @@
         TrackInfo info;
         info.mTimeScale = timeScale;
         info.mRTPTime = 0;
-        info.mNormalPlaytimeUs = 0ll;
+        info.mNormalPlaytimeUs = 0LL;
         info.mNPTMappingValid = false;
 
         if ((isAudio && mAudioTrack == NULL)
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index b3da53f..afdcd37 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -186,7 +186,7 @@
     // We're going to buffer at least 2 secs worth data on all tracks before
     // starting playback (both at startup and after a seek).
 
-    static const int64_t kMinDurationUs = 2000000ll;
+    static const int64_t kMinDurationUs = 2000000LL;
 
     sp<AnotherPacketSource> audioTrack = getSource(true /*audio*/);
     sp<AnotherPacketSource> videoTrack = getSource(false /*audio*/);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3080db5..114f492 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2237,6 +2237,15 @@
         } else {
             err = setupEAC3Codec(encoder, numChannels, sampleRate);
         }
+     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4)) {
+        int32_t numChannels;
+        int32_t sampleRate;
+        if (!msg->findInt32("channel-count", &numChannels)
+                || !msg->findInt32("sample-rate", &sampleRate)) {
+            err = INVALID_OPERATION;
+        } else {
+            err = setupAC4Codec(encoder, numChannels, sampleRate);
+        }
     }
 
     if (err != OK) {
@@ -2348,6 +2357,17 @@
     return err;
 }
 
+status_t ACodec::setAudioPresentation(int32_t presentationId, int32_t programId) {
+    OMX_AUDIO_CONFIG_ANDROID_AUDIOPRESENTATION config;
+    InitOMXParams(&config);
+    config.nPresentationId = (OMX_S32)presentationId;
+    config.nProgramId = (OMX_S32)programId;
+    status_t err = mOMXNode->setConfig(
+            (OMX_INDEXTYPE)OMX_IndexConfigAudioPresentation,
+            &config, sizeof(config));
+    return err;
+}
+
 status_t ACodec::setPriority(int32_t priority) {
     if (priority < 0) {
         return BAD_VALUE;
@@ -2893,6 +2913,38 @@
             (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3, &def, sizeof(def));
 }
 
+status_t ACodec::setupAC4Codec(
+        bool encoder, int32_t numChannels, int32_t sampleRate) {
+    status_t err = setupRawAudioFormat(
+            encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (encoder) {
+        ALOGW("AC4 encoding is not supported.");
+        return INVALID_OPERATION;
+    }
+
+    OMX_AUDIO_PARAM_ANDROID_AC4TYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc4, &def, sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    def.nChannels = numChannels;
+    def.nSampleRate = sampleRate;
+
+    return mOMXNode->setParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc4, &def, sizeof(def));
+}
+
 static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
         bool isAMRWB, int32_t bps) {
     if (isAMRWB) {
@@ -5246,6 +5298,25 @@
                     break;
                 }
 
+                case OMX_AUDIO_CodingAndroidAC4:
+                {
+                    OMX_AUDIO_PARAM_ANDROID_AC4TYPE params;
+                    InitOMXParams(&params);
+                    params.nPortIndex = portIndex;
+
+                    err = mOMXNode->getParameter(
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc4,
+                            &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
+
+                    notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC4);
+                    notify->setInt32("channel-count", params.nChannels);
+                    notify->setInt32("sample-rate", params.nSampleRate);
+                    break;
+                }
+
                 case OMX_AUDIO_CodingAndroidOPUS:
                 {
                     OMX_AUDIO_PARAM_ANDROID_OPUSTYPE params;
@@ -7392,6 +7463,18 @@
         }
     }
 
+    int32_t presentationId = -1;
+    if (params->findInt32("audio-presentation-presentation-id", &presentationId)) {
+        int32_t programId = -1;
+        params->findInt32("audio-presentation-program-id", &programId);
+        status_t err = setAudioPresentation(presentationId, programId);
+        if (err != OK) {
+            ALOGI("[%s] failed setAudioPresentation. Failure is fine since this key is optional",
+                    mComponentName.c_str());
+            err = OK;
+        }
+    }
+
     // Ignore errors as failure is expected for codecs that aren't video encoders.
     (void)configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
 
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index cc5b7da..9aea88a 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -18,9 +18,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: ["libmedia"],
@@ -40,9 +37,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: ["libmedia", "libmediandk"],
@@ -80,9 +74,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -129,6 +120,7 @@
         "MediaMuxer.cpp",
         "NuCachedSource2.cpp",
         "NuMediaExtractor.cpp",
+        "OggWriter.cpp",
         "OMXClient.cpp",
         "OmxInfoBuilder.cpp",
         "RemoteMediaExtractor.cpp",
@@ -146,6 +138,7 @@
 
     shared_libs: [
         "libaudioutils",
+        "libbase",
         "libbinder",
         "libcamera_client",
         "libcutils",
@@ -167,6 +160,7 @@
         "libstagefright_codecbase",
         "libstagefright_foundation",
         "libstagefright_omx_utils",
+        "libstagefright_opus_common",
         "libstagefright_xmlparser",
         "libRScpp",
         "libhidlallocatorutils",
@@ -187,6 +181,7 @@
         "libstagefright_webm",
         "libstagefright_timedtext",
         "libvpx",
+        "libogg",
         "libwebm",
         "libstagefright_esds",
         "libstagefright_id3",
@@ -206,12 +201,6 @@
         "include",
     ],
 
-    // This is needed to make sure libcodec2 exists in all devices.
-    // TODO: Remove this once the public CCodec is enabled.
-    required: [
-        "libcodec2",
-    ],
-
     cflags: [
         "-Wno-multichar",
         "-Werror",
@@ -234,9 +223,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -291,9 +277,6 @@
             "unsigned-integer-overflow",
             "signed-integer-overflow",
         ],
-        diag: {
-            cfi: true,
-        },
     },
 }
 
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index ac9eb0b..ea818ff 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -134,4 +134,57 @@
     return plugin->setMediaCas(plugin->data, casToken, size);
 }
 
+// --------------------------------------------------------------------------------
+MediaExtractorCUnwrapperV3::MediaExtractorCUnwrapperV3(CMediaExtractorV3 *plugin) {
+    this->plugin = plugin;
+}
+
+MediaExtractorCUnwrapperV3::~MediaExtractorCUnwrapperV3() {
+    plugin->free(plugin->data);
+    free(plugin);
+}
+
+size_t MediaExtractorCUnwrapperV3::countTracks() {
+    return plugin->countTracks(plugin->data);
+}
+
+MediaTrack *MediaExtractorCUnwrapperV3::getTrack(size_t index) {
+    return new MediaTrackCUnwrapperV3(plugin->getTrack(plugin->data, index));
+}
+
+status_t MediaExtractorCUnwrapperV3::getTrackMetaData(
+        MetaDataBase& meta, size_t index, uint32_t flags) {
+    sp<AMessage> msg = new AMessage();
+    AMediaFormat *format =  AMediaFormat_fromMsg(&msg);
+    media_status_t ret = plugin->getTrackMetaData(plugin->data, format, index, flags);
+    sp<MetaData> newMeta = new MetaData();
+    convertMessageToMetaData(msg, newMeta);
+    delete format;
+    meta = *newMeta;
+    return reverse_translate_error(ret);
+}
+
+status_t MediaExtractorCUnwrapperV3::getMetaData(MetaDataBase& meta) {
+    sp<AMessage> msg = new AMessage();
+    AMediaFormat *format =  AMediaFormat_fromMsg(&msg);
+    media_status_t ret = plugin->getMetaData(plugin->data, format);
+    sp<MetaData> newMeta = new MetaData();
+    convertMessageToMetaData(msg, newMeta);
+    delete format;
+    meta = *newMeta;
+    return reverse_translate_error(ret);
+}
+
+const char * MediaExtractorCUnwrapperV3::name() {
+    return plugin->name(plugin->data);
+}
+
+uint32_t MediaExtractorCUnwrapperV3::flags() const {
+    return plugin->flags(plugin->data);
+}
+
+status_t MediaExtractorCUnwrapperV3::setMediaCas(const uint8_t* casToken, size_t size) {
+    return plugin->setMediaCas(plugin->data, casToken, size);
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 318c1eb..81fc4ae 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "MediaExtractorFactory"
 #include <utils/Log.h>
 
+#include <android/dlext.h>
 #include <binder/IPCThreadState.h>
 #include <binder/PermissionCache.h>
 #include <binder/IServiceManager.h>
@@ -36,6 +37,23 @@
 #include <dirent.h>
 #include <dlfcn.h>
 
+// Copied from GraphicsEnv.cpp
+// TODO(b/37049319) Get this from a header once one exists
+extern "C" {
+  android_namespace_t* android_create_namespace(const char* name,
+                                                const char* ld_library_path,
+                                                const char* default_library_path,
+                                                uint64_t type,
+                                                const char* permitted_when_isolated_path,
+                                                android_namespace_t* parent);
+  bool android_link_namespaces(android_namespace_t* from,
+                               android_namespace_t* to,
+                               const char* shared_libs_sonames);
+  enum {
+     ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
+  };
+}
+
 namespace android {
 
 // static
@@ -100,6 +118,12 @@
             freeMeta(meta);
         }
         ex = ret != nullptr ? new MediaExtractorCUnwrapperV2(ret) : nullptr;
+    } else if (creatorVersion == 3) {
+        CMediaExtractorV3 *ret = ((CreatorFuncV3)creator)(source->wrap(), meta);
+        if (meta != nullptr && freeMeta != nullptr) {
+            freeMeta(meta);
+        }
+        ex = ret != nullptr ? new MediaExtractorCUnwrapperV3(ret) : nullptr;
     }
 
     ALOGV("Created an extractor '%s' with confidence %.2f",
@@ -139,6 +163,13 @@
 std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
 bool MediaExtractorFactory::gPluginsRegistered = false;
 bool MediaExtractorFactory::gIgnoreVersion = false;
+std::string MediaExtractorFactory::gLinkedLibraries;
+
+// static
+void MediaExtractorFactory::SetLinkedLibraries(const std::string& linkedLibraries) {
+    Mutex::Autolock autoLock(gPluginMutex);
+    gLinkedLibraries = linkedLibraries;
+}
 
 // static
 void *MediaExtractorFactory::sniff(
@@ -170,6 +201,9 @@
         } else if ((*it)->def.def_version == 2) {
             curCreator = (void*) (*it)->def.sniff.v2(
                     source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
+        } else if ((*it)->def.def_version == 3) {
+            curCreator = (void*) (*it)->def.sniff.v3(
+                    source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
         }
 
         if (curCreator) {
@@ -199,7 +233,7 @@
         std::list<sp<ExtractorPlugin>> &pluginList) {
     // sanity check check struct version, uuid, name
     if (plugin->def.def_version == 0
-            || plugin->def.def_version > EXTRACTORDEF_VERSION_CURRENT) {
+            || plugin->def.def_version > EXTRACTORDEF_VERSION_CURRENT + 1) {
         ALOGE("don't understand extractor format %u, ignoring.", plugin->def.def_version);
         return;
     }
@@ -319,6 +353,62 @@
     }
 }
 
+//static
+void MediaExtractorFactory::RegisterExtractorsInApex(
+        const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
+    ALOGV("search for plugins at %s", libDirPath);
+    ALOGV("linked libs %s", gLinkedLibraries.c_str());
+
+    android_namespace_t *extractorNs = android_create_namespace("extractor",
+            nullptr,  // ld_library_path
+            libDirPath,
+            ANDROID_NAMESPACE_TYPE_ISOLATED,
+            nullptr,  // permitted_when_isolated_path
+            nullptr); // parent
+    if (!android_link_namespaces(extractorNs, nullptr, gLinkedLibraries.c_str())) {
+        ALOGE("Failed to link namespace. Failed to load extractor plug-ins in apex.");
+        return;
+    }
+    const android_dlextinfo dlextinfo = {
+        .flags = ANDROID_DLEXT_USE_NAMESPACE,
+        .library_namespace = extractorNs,
+    };
+
+    DIR *libDir = opendir(libDirPath);
+    if (libDir) {
+        struct dirent* libEntry;
+        while ((libEntry = readdir(libDir))) {
+            if (libEntry->d_name[0] == '.') {
+                continue;
+            }
+            String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
+            if (!libPath.contains("extractor.so")) {
+                continue;
+            }
+            void *libHandle = android_dlopen_ext(
+                    libPath.string(),
+                    RTLD_NOW | RTLD_LOCAL, &dlextinfo);
+            if (libHandle) {
+                GetExtractorDef getDef =
+                    (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
+                if (getDef) {
+                    ALOGV("registering sniffer for %s", libPath.string());
+                    RegisterExtractor(
+                            new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
+                } else {
+                    ALOGW("%s does not contain sniffer", libPath.string());
+                    dlclose(libHandle);
+                }
+            } else {
+                ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
+            }
+        }
+        closedir(libDir);
+    } else {
+        ALOGE("couldn't opendir(%s)", libDirPath);
+    }
+}
+
 static bool compareFunc(const sp<ExtractorPlugin>& first, const sp<ExtractorPlugin>& second) {
     return strcmp(first->def.extractor_name, second->def.extractor_name) < 0;
 }
@@ -337,6 +427,12 @@
 
     std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
 
+    RegisterExtractorsInApex("/apex/com.android.media/lib"
+#ifdef __LP64__
+            "64"
+#endif
+            , *newList);
+
     RegisterExtractorsInSystem("/system/lib"
 #ifdef __LP64__
             "64"
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 98f59b5..9ba2add 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -35,6 +35,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MPEG4Writer.h>
+#include <media/stagefright/OggWriter.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
@@ -52,6 +53,8 @@
         mWriter = new MPEG4Writer(fd);
     } else if (format == OUTPUT_FORMAT_WEBM) {
         mWriter = new WebmWriter(fd);
+    } else if (format == OUTPUT_FORMAT_OGG) {
+        mWriter = new OggWriter(fd);
     }
 
     if (mWriter != NULL) {
@@ -59,6 +62,8 @@
         if (format == OUTPUT_FORMAT_HEIF) {
             // Note that the key uses recorder file types.
             mFileMeta->setInt32(kKeyFileType, output_format::OUTPUT_FORMAT_HEIF);
+        } else if (format == OUTPUT_FORMAT_OGG) {
+            mFileMeta->setInt32(kKeyFileType, output_format::OUTPUT_FORMAT_OGG);
         }
         mState = INITIALIZED;
     }
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 821605d..6c0f989 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -155,4 +155,80 @@
     return wrapper->supportsNonBlockingRead(wrapper->data);
 }
 
+/* -------------- unwrapper v3 --------------- */
+
+MediaTrackCUnwrapperV3::MediaTrackCUnwrapperV3(CMediaTrackV3 *cmediatrack3) {
+    wrapper = cmediatrack3;
+    bufferGroup = nullptr;
+}
+
+MediaTrackCUnwrapperV3::~MediaTrackCUnwrapperV3() {
+    wrapper->free(wrapper->data);
+    free(wrapper);
+}
+
+status_t MediaTrackCUnwrapperV3::start() {
+    if (bufferGroup == nullptr) {
+        bufferGroup = new MediaBufferGroup();
+    }
+    return reverse_translate_error(wrapper->start(wrapper->data, bufferGroup->wrap()));
+}
+
+status_t MediaTrackCUnwrapperV3::stop() {
+    return reverse_translate_error(wrapper->stop(wrapper->data));
+}
+
+status_t MediaTrackCUnwrapperV3::getFormat(MetaDataBase& format) {
+    sp<AMessage> msg = new AMessage();
+    AMediaFormat *tmpFormat =  AMediaFormat_fromMsg(&msg);
+    media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
+    sp<MetaData> newMeta = new MetaData();
+    convertMessageToMetaData(msg, newMeta);
+    delete tmpFormat;
+    format = *newMeta;
+    return reverse_translate_error(ret);
+}
+
+status_t MediaTrackCUnwrapperV3::read(MediaBufferBase **buffer, const ReadOptions *options) {
+
+    uint32_t opts = 0;
+
+    if (options && options->getNonBlocking()) {
+        opts |= CMediaTrackReadOptions::NONBLOCKING;
+    }
+
+    int64_t seekPosition = 0;
+    MediaTrack::ReadOptions::SeekMode seekMode;
+    if (options && options->getSeekTo(&seekPosition, &seekMode)) {
+        opts |= SEEK;
+        opts |= (uint32_t) seekMode;
+    }
+    CMediaBufferV3 *buf = nullptr;
+    media_status_t ret = wrapper->read(wrapper->data, &buf, opts, seekPosition);
+    if (ret == AMEDIA_OK && buf != nullptr) {
+        *buffer = (MediaBufferBase*)buf->handle;
+        MetaDataBase &meta = (*buffer)->meta_data();
+        AMediaFormat *format = buf->meta_data(buf->handle);
+        // only convert the keys we're actually expecting, as doing
+        // the full convertMessageToMetadata() for every buffer is
+        // too expensive
+        int64_t val64;
+        if (format->mFormat->findInt64("timeUs", &val64)) {
+            meta.setInt64(kKeyTime, val64);
+        }
+        int32_t val32;
+        if (format->mFormat->findInt32("is-sync-frame", &val32)) {
+            meta.setInt32(kKeyIsSyncFrame, val32);
+        }
+    } else {
+        *buffer = nullptr;
+    }
+
+    return reverse_translate_error(ret);
+}
+
+bool MediaTrackCUnwrapperV3::supportNonblockingRead() {
+    return wrapper->supportsNonBlockingRead(wrapper->data);
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index a3259fd..dbc287e 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -308,6 +308,8 @@
 
 void parseVorbisComment(
         AMediaFormat *fileMeta, const char *comment, size_t commentLength) {
+    // Haptic tag is only kept here as it will only be used in extractor to generate channel mask.
+    const char* const haptic = "haptic";
     struct {
         const char *const mTag;
         const char *mKey;
@@ -328,6 +330,7 @@
         { "LYRICIST", AMEDIAFORMAT_KEY_LYRICIST },
         { "METADATA_BLOCK_PICTURE", AMEDIAFORMAT_KEY_ALBUMART },
         { "ANDROID_LOOP", AMEDIAFORMAT_KEY_LOOP },
+        { "ANDROID_HAPTIC", haptic },
     };
 
         for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
@@ -343,6 +346,15 @@
                     if (!strcasecmp(&comment[tagLen + 1], "true")) {
                         AMediaFormat_setInt32(fileMeta, AMEDIAFORMAT_KEY_LOOP, 1);
                     }
+                } else if (kMap[j].mKey == haptic) {
+                    char *end;
+                    errno = 0;
+                    const int hapticChannelCount = strtol(&comment[tagLen + 1], &end, 10);
+                    if (errno == 0) {
+                        AMediaFormat_setInt32(fileMeta, haptic, hapticChannelCount);
+                    } else {
+                        ALOGE("Error(%d) when parsing haptic channel count", errno);
+                    }
                 } else {
                     AMediaFormat_setString(fileMeta, kMap[j].mKey, &comment[tagLen + 1]);
                 }
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
new file mode 100644
index 0000000..ad55c56
--- /dev/null
+++ b/media/libstagefright/OggWriter.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OggWriter"
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <media/MediaSource.h>
+#include <media/mediarecorder.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OggWriter.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include "OpusHeader.h"
+
+extern "C" {
+#include <ogg/ogg.h>
+}
+
+// store the int32 value in little-endian order.
+static inline void writeint(char *buf, int base, int32_t val) {
+    buf[base + 3] = ((val) >> 24) & 0xff;
+    buf[base + 2] = ((val) >> 16) & 0xff;
+    buf[base + 1] = ((val) >> 8) & 0xff;
+    buf[base] = (val)&0xff;
+}
+
+// linkage between our header OggStreamState and the underlying ogg_stream_state
+// so that consumers of our interface do not require the ogg headers themselves.
+struct OggStreamState : public ogg_stream_state {};
+
+namespace android {
+
+OggWriter::OggWriter(int fd)
+      : mFd(dup(fd)),
+        mInitCheck(mFd < 0 ? NO_INIT : OK) {
+    // empty
+}
+
+OggWriter::~OggWriter() {
+    if (mStarted) {
+        reset();
+    }
+
+    if (mFd != -1) {
+        close(mFd);
+        mFd = -1;
+    }
+
+    free(mOs);
+}
+
+status_t OggWriter::initCheck() const {
+    return mInitCheck;
+}
+
+status_t OggWriter::addSource(const sp<MediaSource>& source) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mSource != NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    // Support is limited to single track of Opus audio.
+    const char* mime;
+    source->getFormat()->findCString(kKeyMIMEType, &mime);
+    const char* opus = MEDIA_MIMETYPE_AUDIO_OPUS;
+    if (strncasecmp(mime, opus, strlen(opus))) {
+        ALOGE("Track (%s) other than %s is not supported", mime, opus);
+        return ERROR_UNSUPPORTED;
+    }
+
+    mOs = (OggStreamState*) malloc(sizeof(ogg_stream_state));
+    if (ogg_stream_init((ogg_stream_state*)mOs, rand()) == -1) {
+        ALOGE("ogg stream init failed");
+        return UNKNOWN_ERROR;
+    }
+
+    // Write Ogg headers.
+    int32_t nChannels = 0;
+    if (!source->getFormat()->findInt32(kKeyChannelCount, &nChannels)) {
+        ALOGE("Missing format keys for audio track");
+        source->getFormat()->dumpToLog();
+        return BAD_VALUE;
+    }
+    source->getFormat()->dumpToLog();
+
+    int32_t sampleRate = 0;
+    if (!source->getFormat()->findInt32(kKeySampleRate, &sampleRate)) {
+        ALOGE("Missing format key for sample rate");
+        source->getFormat()->dumpToLog();
+        return UNKNOWN_ERROR;
+    }
+
+    mSampleRate = sampleRate;
+
+    OpusHeader header;
+    header.channels = nChannels;
+    header.num_streams = nChannels;
+    header.num_coupled = 0;
+    header.channel_mapping = ((nChannels > 8) ? 255 : (nChannels > 2));
+    header.gain_db = 0;
+    header.skip_samples = 0;
+
+    // headers are 21-bytes + something driven by channel count
+    // expect numbers in the low 30's here. WriteOpusHeader() will tell us
+    // if things are bad.
+    unsigned char header_data[100];
+    ogg_packet op;
+    ogg_page og;
+
+    const int packet_size = WriteOpusHeader(header, mSampleRate, (uint8_t*)header_data,
+                                            sizeof(header_data));
+
+    if (packet_size < 0) {
+        ALOGE("opus header writing failed");
+        return UNKNOWN_ERROR;
+    }
+    op.packet = header_data;
+    op.bytes = packet_size;
+    op.b_o_s = 1;
+    op.e_o_s = 0;
+    op.granulepos = 0;
+    op.packetno = 0;
+    ogg_stream_packetin((ogg_stream_state*)mOs, &op);
+
+    int ret;
+    while ((ret = ogg_stream_flush((ogg_stream_state*)mOs, &og))) {
+        if (!ret) break;
+        write(mFd, og.header, og.header_len);
+        write(mFd, og.body, og.body_len);
+    }
+
+
+    const char* vendor_string = "libopus";
+    const int vendor_length = strlen(vendor_string);
+    int user_comment_list_length = 0;
+
+    const int comments_length = 8 + 4 + vendor_length + 4 + user_comment_list_length;
+    char* comments = (char*)malloc(comments_length);
+    if (comments == NULL) {
+        ALOGE("failed to allocate ogg comment buffer");
+        return UNKNOWN_ERROR;
+    }
+    memcpy(comments, "OpusTags", 8);
+    writeint(comments, 8, vendor_length);
+    memcpy(comments + 12, vendor_string, vendor_length);
+    writeint(comments, 12 + vendor_length, user_comment_list_length);
+
+    op.packet = (unsigned char*)comments;
+    op.bytes = comments_length;
+    op.b_o_s = 0;
+    op.e_o_s = 0;
+    op.granulepos = 0;
+    op.packetno = 1;
+    ogg_stream_packetin((ogg_stream_state*)mOs, &op);
+
+    while ((ret = ogg_stream_flush((ogg_stream_state*)mOs, &og))) {
+        if (!ret) break;
+        write(mFd, og.header, og.header_len);
+        write(mFd, og.body, og.body_len);
+    }
+
+    mSource = source;
+    free(comments);
+    return OK;
+}
+
+status_t OggWriter::start(MetaData* /* params */) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mSource == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    if (mStarted && mPaused) {
+        mPaused = false;
+        mResumed = true;
+        return OK;
+    } else if (mStarted) {
+        // Already started, does nothing
+        return OK;
+    }
+
+    status_t err = mSource->start();
+
+    if (err != OK) {
+        return err;
+    }
+
+    pthread_attr_t attr;
+    pthread_attr_init(&attr);
+    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+    mReachedEOS = false;
+    mDone = false;
+
+    pthread_create(&mThread, &attr, ThreadWrapper, this);
+    pthread_attr_destroy(&attr);
+
+    mStarted = true;
+
+    return OK;
+}
+
+status_t OggWriter::pause() {
+    if (!mStarted) {
+        return OK;
+    }
+    mPaused = true;
+    return OK;
+}
+
+status_t OggWriter::reset() {
+    if (!mStarted) {
+        return OK;
+    }
+
+    mDone = true;
+
+    void* dummy;
+    pthread_join(mThread, &dummy);
+
+    status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+    {
+        status_t status = mSource->stop();
+        if (err == OK && (status != OK && status != ERROR_END_OF_STREAM)) {
+            err = status;
+        }
+    }
+
+    mStarted = false;
+    return err;
+}
+
+bool OggWriter::exceedsFileSizeLimit() {
+    if (mMaxFileSizeLimitBytes == 0) {
+        return false;
+    }
+    return mEstimatedSizeBytes > mMaxFileSizeLimitBytes;
+}
+
+bool OggWriter::exceedsFileDurationLimit() {
+    if (mMaxFileDurationLimitUs == 0) {
+        return false;
+    }
+    return mEstimatedDurationUs > mMaxFileDurationLimitUs;
+}
+
+// static
+void* OggWriter::ThreadWrapper(void* me) {
+    return (void*)(uintptr_t) static_cast<OggWriter*>(me)->threadFunc();
+}
+
+status_t OggWriter::threadFunc() {
+    mEstimatedDurationUs = 0;
+    mEstimatedSizeBytes = 0;
+    bool stoppedPrematurely = true;
+    int64_t previousPausedDurationUs = 0;
+    int64_t maxTimestampUs = 0;
+    status_t err = OK;
+
+    prctl(PR_SET_NAME, (unsigned long)"OggWriter", 0, 0, 0);
+
+    while (!mDone) {
+        MediaBufferBase* buffer = nullptr;
+        err = mSource->read(&buffer);
+
+        if (err != OK) {
+            ALOGW("failed to read next buffer");
+            break;
+        }
+
+        if (mPaused) {
+            buffer->release();
+            buffer = nullptr;
+            continue;
+        }
+        mEstimatedSizeBytes += buffer->range_length();
+        if (exceedsFileSizeLimit()) {
+            buffer->release();
+            buffer = nullptr;
+            notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+            ALOGW("estimated size(%" PRId64 ") exceeds limit (%" PRId64 ")",
+                  mEstimatedSizeBytes, mMaxFileSizeLimitBytes);
+            break;
+        }
+        int64_t timestampUs;
+        CHECK(buffer->meta_data().findInt64(kKeyTime, &timestampUs));
+        if (timestampUs > mEstimatedDurationUs) {
+            mEstimatedDurationUs = timestampUs;
+        }
+        if (mResumed) {
+            previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+            mResumed = false;
+        }
+
+        timestampUs -= previousPausedDurationUs;
+
+        ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64, timestampUs,
+              previousPausedDurationUs);
+        if (timestampUs > maxTimestampUs) {
+            maxTimestampUs = timestampUs;
+        }
+
+        if (exceedsFileDurationLimit()) {
+            buffer->release();
+            buffer = nullptr;
+            notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+            ALOGW("estimated duration(%" PRId64 " us) exceeds limit(%" PRId64 " us)",
+                  mEstimatedDurationUs, mMaxFileDurationLimitUs);
+            break;
+        }
+
+        ogg_packet op;
+        ogg_page og;
+        op.packet = (uint8_t*)buffer->data() + buffer->range_offset();
+        op.bytes = (long)buffer->range_length();
+        op.b_o_s = 0;
+        op.e_o_s = mReachedEOS ? 1 : 0;
+        // granulepos is the total number of PCM audio samples @ 48 kHz, up to and
+        // including the current packet.
+        ogg_int64_t granulepos = (48000 * mEstimatedDurationUs) / 1000000;
+        op.granulepos = granulepos;
+
+        // Headers are at packets 0 and 1.
+        op.packetno = 2 + (ogg_int32_t)mCurrentPacketId++;
+        ogg_stream_packetin((ogg_stream_state*)mOs, &op);
+        size_t n = 0;
+
+        while (ogg_stream_flush((ogg_stream_state*)mOs, &og) > 0) {
+            write(mFd, og.header, og.header_len);
+            write(mFd, og.body, og.body_len);
+            n = n + og.header_len + og.body_len;
+        }
+
+        if (n < buffer->range_length()) {
+            buffer->release();
+            buffer = nullptr;
+            err = ERROR_IO;
+            break;
+        }
+
+        if (err != OK) {
+            break;
+        }
+
+        stoppedPrematurely = false;
+
+        buffer->release();
+        buffer = nullptr;
+    }
+
+    // end of stream is an ok thing
+    if (err == ERROR_END_OF_STREAM) {
+        err = OK;
+    }
+
+    if (err == OK && stoppedPrematurely) {
+        err = ERROR_MALFORMED;
+    }
+
+    close(mFd);
+    mFd = -1;
+    mReachedEOS = true;
+
+    return err;
+}
+
+bool OggWriter::reachedEOS() {
+    return mReachedEOS;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index 610b961..17187a8 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -41,7 +41,7 @@
         ".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
         ".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
         ".avi", ".mpeg", ".mpg", ".awb", ".mpga", ".mov",
-        ".m4v", ".oga"
+        ".m4v", ".oga", ".m4r"
     };
     static const size_t kNumValidExtensions =
         sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
index dd5903a..b90649c 100644
--- a/media/libstagefright/StagefrightPluginLoader.cpp
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "StagefrightPluginLoader"
 #include <utils/Log.h>
 
+#include <android-base/properties.h>
 #include <dlfcn.h>
 
 #include "StagefrightPluginLoader.h"
@@ -27,9 +28,17 @@
 /* static */ Mutex StagefrightPluginLoader::sMutex;
 /* static */ std::unique_ptr<StagefrightPluginLoader> StagefrightPluginLoader::sInstance;
 
-StagefrightPluginLoader::StagefrightPluginLoader(const char *libPath)
-    : mCreateCodec(nullptr),
-      mCreateBuilder(nullptr) {
+namespace /* unnamed */ {
+
+constexpr const char kCCodecPluginPath[] = "libsfplugin_ccodec.so";
+
+}  // unnamed namespace
+
+StagefrightPluginLoader::StagefrightPluginLoader(const char *libPath) {
+    if (android::base::GetIntProperty("debug.media.codec2", 0) == 0) {
+        ALOGD("CCodec is disabled.");
+        return;
+    }
     mLibHandle = dlopen(libPath, RTLD_NOW | RTLD_NODELETE);
     if (mLibHandle == nullptr) {
         ALOGD("Failed to load library: %s (%s)", libPath, dlerror());
@@ -87,7 +96,7 @@
     Mutex::Autolock _l(sMutex);
     if (!sInstance) {
         ALOGV("Loading library");
-        sInstance.reset(new StagefrightPluginLoader("libstagefright_ccodec.so"));
+        sInstance.reset(new StagefrightPluginLoader(kCCodecPluginPath));
     }
     return sInstance;
 }
diff --git a/media/libstagefright/StagefrightPluginLoader.h b/media/libstagefright/StagefrightPluginLoader.h
index 999d30c..78effbf 100644
--- a/media/libstagefright/StagefrightPluginLoader.h
+++ b/media/libstagefright/StagefrightPluginLoader.h
@@ -40,10 +40,10 @@
     static Mutex sMutex;
     static std::unique_ptr<StagefrightPluginLoader> sInstance;
 
-    void *mLibHandle;
-    CodecBase::CreateCodecFunc mCreateCodec;
-    MediaCodecListBuilderBase::CreateBuilderFunc mCreateBuilder;
-    CodecBase::CreateInputSurfaceFunc mCreateInputSurface;
+    void *mLibHandle{nullptr};
+    CodecBase::CreateCodecFunc mCreateCodec{nullptr};
+    MediaCodecListBuilderBase::CreateBuilderFunc mCreateBuilder{nullptr};
+    CodecBase::CreateInputSurfaceFunc mCreateInputSurface{nullptr};
 };
 
 }  // namespace android
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 53c32b2..670b607 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -588,10 +588,17 @@
     }
 };
 
+static std::vector<std::pair<const char *, uint32_t>> floatMappings {
+    {
+        { "capture-rate", kKeyCaptureFramerate },
+    }
+};
+
 static std::vector<std::pair<const char *, uint32_t>> int64Mappings {
     {
         { "exif-offset", kKeyExifOffset },
         { "exif-size", kKeyExifSize },
+        { "thumbnail-time", kKeyThumbnailTime },
     }
 };
 
@@ -632,6 +639,13 @@
         }
     }
 
+    for (auto elem : floatMappings) {
+        float value;
+        if (msg->findFloat(elem.first, &value)) {
+            meta->setFloat(elem.second, value);
+        }
+    }
+
     for (auto elem : int64Mappings) {
         int64_t value;
         if (msg->findInt64(elem.first, &value)) {
@@ -663,6 +677,13 @@
         }
     }
 
+    for (auto elem : floatMappings) {
+        float value;
+        if (meta->findFloat(elem.second, &value)) {
+            format->setFloat(elem.first, value);
+        }
+    }
+
     for (auto elem : int64Mappings) {
         int64_t value;
         if (meta->findInt64(elem.second, &value)) {
@@ -1801,6 +1822,7 @@
     { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
     { MEDIA_MIMETYPE_AUDIO_AC3,         AUDIO_FORMAT_AC3},
     { MEDIA_MIMETYPE_AUDIO_EAC3,        AUDIO_FORMAT_E_AC3},
+    { MEDIA_MIMETYPE_AUDIO_EAC3_JOC,    AUDIO_FORMAT_E_AC3_JOC},
     { MEDIA_MIMETYPE_AUDIO_AC4,         AUDIO_FORMAT_AC4},
     { MEDIA_MIMETYPE_AUDIO_FLAC,        AUDIO_FORMAT_FLAC},
     { MEDIA_MIMETYPE_AUDIO_ALAC,        AUDIO_FORMAT_ALAC },
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index c647f5f..81777f1 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -61,8 +61,5 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libstagefright/codecs/aacdec/Android.bp b/media/libstagefright/codecs/aacdec/Android.bp
index 7352854..25628a2 100644
--- a/media/libstagefright/codecs/aacdec/Android.bp
+++ b/media/libstagefright/codecs/aacdec/Android.bp
@@ -25,9 +25,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static_libs: ["libFraunhoferAAC"],
diff --git a/media/libstagefright/codecs/aacenc/Android.bp b/media/libstagefright/codecs/aacenc/Android.bp
index 9342351..ec1151b 100644
--- a/media/libstagefright/codecs/aacenc/Android.bp
+++ b/media/libstagefright/codecs/aacenc/Android.bp
@@ -22,9 +22,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static_libs: ["libFraunhoferAAC"],
diff --git a/media/libstagefright/codecs/amrwbenc/Android.bp b/media/libstagefright/codecs/amrwbenc/Android.bp
index ebe08c6..b9d45c1 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/Android.bp
@@ -134,9 +134,6 @@
     cflags: ["-Werror"],
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
@@ -166,9 +163,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static_libs: ["libstagefright_amrwbenc"],
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp
index 81b3f69..95f9494 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp
@@ -23,8 +23,5 @@
 
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libstagefright/codecs/avcdec/Android.bp b/media/libstagefright/codecs/avcdec/Android.bp
index cf50a04..8a34845 100644
--- a/media/libstagefright/codecs/avcdec/Android.bp
+++ b/media/libstagefright/codecs/avcdec/Android.bp
@@ -34,9 +34,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/libstagefright/codecs/avcenc/Android.bp b/media/libstagefright/codecs/avcenc/Android.bp
index cefe77c..6371828 100644
--- a/media/libstagefright/codecs/avcenc/Android.bp
+++ b/media/libstagefright/codecs/avcenc/Android.bp
@@ -28,9 +28,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     cflags: [
diff --git a/media/libstagefright/codecs/flac/dec/Android.bp b/media/libstagefright/codecs/flac/dec/Android.bp
index 9af086b..1674cb2 100644
--- a/media/libstagefright/codecs/flac/dec/Android.bp
+++ b/media/libstagefright/codecs/flac/dec/Android.bp
@@ -26,9 +26,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index 73f0dac..9b696da 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -18,9 +18,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/codecs/g711/dec/Android.bp b/media/libstagefright/codecs/g711/dec/Android.bp
index 3d97d8c..7097688 100644
--- a/media/libstagefright/codecs/g711/dec/Android.bp
+++ b/media/libstagefright/codecs/g711/dec/Android.bp
@@ -29,9 +29,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/gsm/dec/Android.bp b/media/libstagefright/codecs/gsm/dec/Android.bp
index 1c3208b..a973f70 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.bp
+++ b/media/libstagefright/codecs/gsm/dec/Android.bp
@@ -23,9 +23,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/codecs/hevcdec/Android.bp b/media/libstagefright/codecs/hevcdec/Android.bp
index 45920e6..60fc446 100644
--- a/media/libstagefright/codecs/hevcdec/Android.bp
+++ b/media/libstagefright/codecs/hevcdec/Android.bp
@@ -28,9 +28,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.bp b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
index ca70cc2..41141b1 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
@@ -60,9 +60,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -106,9 +103,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
index 6be4036..d4f7d50 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
@@ -44,9 +44,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -92,9 +89,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     compile_multilib: "32",
 }
@@ -122,9 +116,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static_libs: ["libstagefright_m4vh263enc"],
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
index 00f2dd3..71e1170 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -17,7 +17,6 @@
 #ifndef SOFT_MPEG4_ENCODER_H_
 #define SOFT_MPEG4_ENCODER_H_
 
-#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/omx/SoftVideoEncoderOMXComponent.h>
 #include "mp4enc_api.h"
diff --git a/media/libstagefright/codecs/mp3dec/Android.bp b/media/libstagefright/codecs/mp3dec/Android.bp
index 9fa9a4c..2154f84 100644
--- a/media/libstagefright/codecs/mp3dec/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/Android.bp
@@ -59,9 +59,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     include_dirs: ["frameworks/av/media/libstagefright/include"],
@@ -106,9 +103,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
@@ -144,9 +138,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static_libs: [
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp
index e55c2e7..d644207 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp
@@ -127,13 +127,13 @@
         {
             tmp = getbits_crc(inputStream, 14, crc, info->error_protection);
             si->main_data_begin = (tmp << 18) >> 23;    /* 9 */
-            si->private_bits    = (tmp << 23) >> 27;    /* 5 */
+            si->private_bits    = (tmp << 27) >> 27;    /* 5 */
         }
         else
         {
             tmp = getbits_crc(inputStream, 12, crc, info->error_protection);
             si->main_data_begin = (tmp << 20) >> 23;    /* 9 */
-            si->private_bits    = (tmp << 23) >> 29;    /* 3 */
+            si->private_bits    = (tmp << 29) >> 29;    /* 3 */
 
         }
 
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.bp b/media/libstagefright/codecs/mpeg2dec/Android.bp
index cd83db0..c655544 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.bp
+++ b/media/libstagefright/codecs/mpeg2dec/Android.bp
@@ -34,9 +34,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/on2/dec/Android.bp b/media/libstagefright/codecs/on2/dec/Android.bp
index 8a9399a..174f183 100644
--- a/media/libstagefright/codecs/on2/dec/Android.bp
+++ b/media/libstagefright/codecs/on2/dec/Android.bp
@@ -31,9 +31,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index 3d9feeb..891a771 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -26,9 +26,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static_libs: ["libvpx"],
diff --git a/media/libstagefright/codecs/opus/dec/Android.bp b/media/libstagefright/codecs/opus/dec/Android.bp
index 43318f2..afe459d 100644
--- a/media/libstagefright/codecs/opus/dec/Android.bp
+++ b/media/libstagefright/codecs/opus/dec/Android.bp
@@ -30,9 +30,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     compile_multilib: "32",
 }
diff --git a/media/libstagefright/codecs/raw/Android.bp b/media/libstagefright/codecs/raw/Android.bp
index c8d7d00..f822445 100644
--- a/media/libstagefright/codecs/raw/Android.bp
+++ b/media/libstagefright/codecs/raw/Android.bp
@@ -22,9 +22,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/colorconversion/Android.bp b/media/libstagefright/colorconversion/Android.bp
index 16e9ded..ba57497 100644
--- a/media/libstagefright/colorconversion/Android.bp
+++ b/media/libstagefright/colorconversion/Android.bp
@@ -24,8 +24,5 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index e944224..7a67e55 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -31,8 +31,5 @@
 
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index 8d486cf..6bfab16 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -24,9 +24,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     static: {
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index a8adff5..fb51cc5 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -365,6 +365,8 @@
 // static
 AString AString::FromParcel(const Parcel &parcel) {
     size_t size = static_cast<size_t>(parcel.readInt32());
+    // The static analyzer incorrectly reports a false-positive here in c++17.
+    // https://bugs.llvm.org/show_bug.cgi?id=38176 . NOLINTNEXTLINE
     return AString(static_cast<const char *>(parcel.readInplace(size)), size);
 }
 
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index 861528e..dcf1ab8 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -79,8 +79,5 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index f93ae65..aba44bb 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
 const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
 const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC = "audio/eac3-joc";
 const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 523378e..8edddcc 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
 extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
 extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC;
 extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
diff --git a/media/libstagefright/http/Android.bp b/media/libstagefright/http/Android.bp
index 2e49fc4..8655caf 100644
--- a/media/libstagefright/http/Android.bp
+++ b/media/libstagefright/http/Android.bp
@@ -29,9 +29,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     product_variables: {
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 2907751..78d410a 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -25,9 +25,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index b42c053..7151d07 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -16,9 +16,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -47,9 +44,6 @@
 
     sanitize: {
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 73f93d1..80125d4 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -461,6 +461,8 @@
 
     status_t setupEAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
 
+    status_t setupAC4Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+
     status_t selectAudioPortFormat(
             OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
 
@@ -477,6 +479,7 @@
     status_t setPriority(int32_t priority);
     status_t setLatency(uint32_t latency);
     status_t getLatency(uint32_t *latency);
+    status_t setAudioPresentation(int32_t presentationId, int32_t programId);
     status_t setOperatingRate(float rateFloat, bool isVideo);
     status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
     status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index a462ae7..704bfdd 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -135,6 +135,8 @@
 constexpr int32_t VP9Profile3 = 0x08;
 constexpr int32_t VP9Profile2HDR = 0x1000;
 constexpr int32_t VP9Profile3HDR = 0x2000;
+constexpr int32_t VP9Profile2HDR10Plus = 0x4000;
+constexpr int32_t VP9Profile3HDR10Plus = 0x8000;
 
 constexpr int32_t VP9Level1  = 0x1;
 constexpr int32_t VP9Level11 = 0x2;
@@ -155,6 +157,7 @@
 constexpr int32_t HEVCProfileMain10      = 0x02;
 constexpr int32_t HEVCProfileMainStill   = 0x04;
 constexpr int32_t HEVCProfileMain10HDR10 = 0x1000;
+constexpr int32_t HEVCProfileMain10HDR10Plus = 0x2000;
 
 constexpr int32_t HEVCMainTierLevel1  = 0x1;
 constexpr int32_t HEVCHighTierLevel1  = 0x2;
@@ -343,6 +346,7 @@
 constexpr char KEY_GRID_COLUMNS[] = "grid-cols";
 constexpr char KEY_GRID_ROWS[] = "grid-rows";
 constexpr char KEY_HDR_STATIC_INFO[] = "hdr-static-info";
+constexpr char KEY_HDR10_PLUS_INFO[] = "hdr10-plus-info";
 constexpr char KEY_HEIGHT[] = "height";
 constexpr char KEY_I_FRAME_INTERVAL[] = "i-frame-interval";
 constexpr char KEY_INTRA_REFRESH_PERIOD[] = "intra-refresh-period";
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractor.h b/media/libstagefright/include/media/stagefright/MediaExtractor.h
index 71343d5..6f3e57e 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractor.h
@@ -135,6 +135,22 @@
     CMediaExtractorV2 *plugin;
 };
 
+class MediaExtractorCUnwrapperV3 : public MediaExtractorCUnwrapper {
+public:
+    explicit MediaExtractorCUnwrapperV3(CMediaExtractorV3 *plugin);
+    virtual size_t countTracks();
+    virtual MediaTrack *getTrack(size_t index);
+    virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
+    virtual status_t getMetaData(MetaDataBase& meta);
+    virtual const char * name();
+    virtual uint32_t flags() const;
+    virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
+protected:
+    virtual ~MediaExtractorCUnwrapperV3();
+private:
+    CMediaExtractorV3 *plugin;
+};
+
 }  // namespace android
 
 #endif  // MEDIA_EXTRACTOR_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index ef9f7ed..84e01f3 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -35,17 +35,21 @@
             const sp<DataSource> &source, const char *mime = NULL);
     static void LoadPlugins(const ::std::string& apkPath);
     static status_t dump(int fd, const Vector<String16>& args);
+    static void SetLinkedLibraries(const std::string& linkedLibraries);
 
 private:
     static Mutex gPluginMutex;
     static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
     static bool gPluginsRegistered;
     static bool gIgnoreVersion;
+    static std::string gLinkedLibraries;
 
     static void RegisterExtractorsInApk(
             const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractorsInSystem(
             const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
+    static void RegisterExtractorsInApex(
+            const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractor(
             const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
 
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index 66f4d72..69d6cde 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -49,6 +49,7 @@
         OUTPUT_FORMAT_WEBM        = 1,
         OUTPUT_FORMAT_THREE_GPP   = 2,
         OUTPUT_FORMAT_HEIF        = 3,
+        OUTPUT_FORMAT_OGG         = 4,
         OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
     };
 
diff --git a/media/libstagefright/include/media/stagefright/OggWriter.h b/media/libstagefright/include/media/stagefright/OggWriter.h
new file mode 100644
index 0000000..e3837cd
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/OggWriter.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OGG_WRITER_H_
+
+#define OGG_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+
+struct OggStreamState;
+
+namespace android {
+
+struct OggWriter : public MediaWriter {
+    OggWriter(int fd);
+
+    status_t initCheck() const;
+
+    virtual status_t addSource(const sp<MediaSource>& source);
+    virtual bool reachedEOS();
+    virtual status_t start(MetaData* params = NULL);
+    virtual status_t stop() { return reset(); }
+    virtual status_t pause();
+
+protected:
+    ~OggWriter();
+
+private:
+    int mFd;
+    status_t mInitCheck;
+    sp<MediaSource> mSource;
+    bool mStarted = false;
+    volatile bool mPaused = false;
+    volatile bool mResumed = false;
+    volatile bool mDone;
+    volatile bool mReachedEOS;
+    pthread_t mThread;
+    int64_t mSampleRate;
+    int64_t mEstimatedSizeBytes;
+    int64_t mEstimatedDurationUs;
+
+    static void* ThreadWrapper(void*);
+    status_t threadFunc();
+    bool exceedsFileSizeLimit();
+    bool exceedsFileDurationLimit();
+    status_t reset();
+
+    int32_t mCurrentPacketId;
+    OggStreamState* mOs = nullptr;
+
+    OggWriter(const OggWriter&);
+    OggWriter& operator=(const OggWriter&);
+};
+
+}  // namespace android
+
+#endif  // OGG_WRITER_H_
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index cf93fcf..590131e 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -121,6 +121,8 @@
     ATSParser *mParser;
     unsigned mProgramNumber;
     unsigned mProgramMapPID;
+    uint32_t mPMTVersion;
+    uint32_t mPMT_CRC;
     KeyedVector<unsigned, sp<Stream> > mStreams;
     bool mFirstPTSValid;
     uint64_t mFirstPTS;
@@ -143,6 +145,9 @@
     unsigned typeExt() const { return mStreamTypeExt; }
     unsigned pid() const { return mElementaryPID; }
     void setPID(unsigned pid) { mElementaryPID = pid; }
+    void setAudioPresentations(AudioPresentationCollection audioPresentations) {
+        mAudioPresentations = audioPresentations;
+    }
 
     void setCasInfo(
             int32_t systemId,
@@ -293,6 +298,8 @@
     : mParser(parser),
       mProgramNumber(programNumber),
       mProgramMapPID(programMapPID),
+      mPMTVersion(0xffffffff),
+      mPMT_CRC(0xffffffff),
       mFirstPTSValid(false),
       mFirstPTS(0),
       mLastRecoveredPTS(lastRecoveredPTS) {
@@ -480,7 +487,13 @@
 
     MY_LOGV("  program_number = %u", br->getBits(16));
     MY_LOGV("  reserved = %u", br->getBits(2));
-    MY_LOGV("  version_number = %u", br->getBits(5));
+    bool audioPresentationsChanged = false;
+    unsigned pmtVersion = br->getBits(5);
+    if (pmtVersion != mPMTVersion) {
+        audioPresentationsChanged = true;
+        mPMTVersion = pmtVersion;
+    }
+    MY_LOGV("  version_number = %u", pmtVersion);
     MY_LOGV("  current_next_indicator = %u", br->getBits(1));
     MY_LOGV("  section_number = %u", br->getBits(8));
     MY_LOGV("  last_section_number = %u", br->getBits(8));
@@ -661,7 +674,12 @@
     if (infoBytesRemaining != 0) {
         ALOGW("Section data remains unconsumed");
     }
-    MY_LOGV("  CRC = 0x%08x", br->getBits(32));
+    unsigned crc = br->getBits(32);
+    if (crc != mPMT_CRC) {
+        audioPresentationsChanged = true;
+        mPMT_CRC = crc;
+    }
+    MY_LOGV("  CRC = 0x%08x", crc);
 
     bool PIDsChanged = false;
     for (size_t i = 0; i < infos.size(); ++i) {
@@ -722,6 +740,10 @@
             isAddingScrambledStream |= info.mCADescriptor.mSystemID >= 0;
             mStreams.add(info.mPID, stream);
         }
+        else if (index >= 0 && mStreams.editValueAt(index)->isAudio()
+                 && audioPresentationsChanged) {
+            mStreams.editValueAt(index)->setAudioPresentations(info.mAudioPresentations);
+        }
     }
 
     if (isAddingScrambledStream) {
@@ -1732,6 +1754,7 @@
                 mSource->setFormat(mQueue->getFormat());
             }
             mSource->queueAccessUnit(accessUnit);
+            mSource->convertAudioPresentationInfoToMetadata(mAudioPresentations);
         }
 
         // Every access unit has a pesStartOffset queued in |mPesStartOffsets|.
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index 7e00eb8..e516cf1 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -25,9 +25,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     shared_libs: [
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 6250045..8a76de3 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -79,9 +79,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -115,9 +112,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
     cflags: ["-Wall", "-Werror"],
 }
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index c499c77..b187035 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -164,6 +164,8 @@
             "audio_decoder.ac3", "audio_encoder.ac3" },
         { MEDIA_MIMETYPE_AUDIO_EAC3,
             "audio_decoder.eac3", "audio_encoder.eac3" },
+        { MEDIA_MIMETYPE_AUDIO_EAC3_JOC,
+            "audio_decoder.eac3_joc", "audio_encoder.eac3_joc" },
         { MEDIA_MIMETYPE_AUDIO_AC4,
             "audio_decoder.ac4", "audio_encoder.ac4" },
         { MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
diff --git a/media/libstagefright/opus/Android.bp b/media/libstagefright/opus/Android.bp
new file mode 100644
index 0000000..c5086ec
--- /dev/null
+++ b/media/libstagefright/opus/Android.bp
@@ -0,0 +1,21 @@
+cc_library_shared {
+    name: "libstagefright_opus_common",
+    vendor_available: true,
+
+    export_include_dirs: ["include"],
+
+    srcs: ["OpusHeader.cpp"],
+
+    shared_libs: ["liblog"],
+
+    cflags: ["-Werror"],
+
+    sanitize: {
+        integer_overflow: true,
+        cfi: true,
+        diag: {
+            integer_overflow: true,
+            cfi: true,
+        },
+    },
+}
\ No newline at end of file
diff --git a/media/libstagefright/opus/OpusHeader.cpp b/media/libstagefright/opus/OpusHeader.cpp
new file mode 100644
index 0000000..e4a460c
--- /dev/null
+++ b/media/libstagefright/opus/OpusHeader.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftOpus"
+
+#include <cstring>
+#include <stdint.h>
+
+#include <log/log.h>
+
+#include "OpusHeader.h"
+
+namespace android {
+
+// Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies
+// mappings for up to 8 channels. This information is part of the Vorbis I
+// Specification:
+// http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
+constexpr int kMaxChannels = 8;
+
+constexpr uint8_t kOpusChannelMap[kMaxChannels][kMaxChannels] = {
+        {0},
+        {0, 1},
+        {0, 2, 1},
+        {0, 1, 2, 3},
+        {0, 4, 1, 2, 3},
+        {0, 4, 1, 2, 3, 5},
+        {0, 4, 1, 2, 3, 5, 6},
+        {0, 6, 1, 2, 3, 4, 5, 7},
+};
+
+// Opus always has a 48kHz output rate. This is true for all Opus, not just this
+// implementation.
+constexpr int kRate = 48000;
+// Size of the Opus header excluding optional mapping information.
+constexpr size_t kOpusHeaderSize = 19;
+// Offset to magic string that starts Opus header.
+constexpr size_t kOpusHeaderLabelOffset = 0;
+// Offset to Opus version in the Opus header.
+constexpr size_t kOpusHeaderVersionOffset = 8;
+// Offset to the channel count byte in the Opus header.
+constexpr size_t kOpusHeaderChannelsOffset = 9;
+// Offset to the pre-skip value in the Opus header.
+constexpr size_t kOpusHeaderSkipSamplesOffset = 10;
+// Offset to sample rate in the Opus header.
+constexpr size_t kOpusHeaderSampleRateOffset = 12;
+// Offset to the gain value in the Opus header.
+constexpr size_t kOpusHeaderGainOffset = 16;
+// Offset to the channel mapping byte in the Opus header.
+constexpr size_t kOpusHeaderChannelMappingOffset = 18;
+// Opus Header contains a stream map. The mapping values are in the header
+// beyond the always present |kOpusHeaderSize| bytes of data. The mapping
+// data contains stream count, coupling information, and per channel mapping
+// values:
+//   - Byte 0: Number of streams.
+//   - Byte 1: Number coupled.
+//   - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping
+//             values.
+// Offset to the number of streams in the Opus header.
+constexpr size_t kOpusHeaderNumStreamsOffset = 19;
+// Offset to the number of streams that are coupled in the Opus header.
+constexpr size_t kOpusHeaderNumCoupledStreamsOffset = 20;
+// Offset to the stream to channel mapping in the Opus header.
+constexpr size_t kOpusHeaderStreamMapOffset = 21;
+// Maximum packet size used in Xiph's opusdec.
+constexpr int kMaxOpusOutputPacketSizeSamples = 960 * 6;
+
+// Default audio output channel layout. Used to initialize |stream_map| in
+// OpusHeader, and passed to opus_multistream_decoder_create() when the header
+// does not contain mapping information. The values are valid only for mono and
+// stereo output: Opus streams with more than 2 channels require a stream map.
+constexpr int kMaxChannelsWithDefaultLayout = 2;
+constexpr uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = {0, 1};
+
+static uint16_t ReadLE16(const uint8_t* data, size_t data_size, uint32_t read_offset) {
+    // check whether the 2nd byte is within the buffer
+    if (read_offset + 1 >= data_size) return 0;
+    uint16_t val;
+    val = data[read_offset];
+    val |= data[read_offset + 1] << 8;
+    return val;
+}
+
+// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
+bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header) {
+    if (data_size < kOpusHeaderSize) {
+        ALOGV("Header size is too small.");
+        return false;
+    }
+    header->channels = data[kOpusHeaderChannelsOffset];
+
+    if (header->channels < 1 || header->channels > kMaxChannels) {
+        ALOGV("Invalid Header, bad channel count: %d", header->channels);
+        return false;
+    }
+    header->skip_samples = ReadLE16(data, data_size, kOpusHeaderSkipSamplesOffset);
+    header->gain_db = static_cast<int16_t>(ReadLE16(data, data_size, kOpusHeaderGainOffset));
+    header->channel_mapping = data[kOpusHeaderChannelMappingOffset];
+    if (!header->channel_mapping) {
+        if (header->channels > kMaxChannelsWithDefaultLayout) {
+            ALOGV("Invalid Header, missing stream map.");
+            return false;
+        }
+        header->num_streams = 1;
+        header->num_coupled = header->channels > 1;
+        header->stream_map[0] = 0;
+        header->stream_map[1] = 1;
+        return true;
+    }
+    if (data_size < kOpusHeaderStreamMapOffset + header->channels) {
+        ALOGV("Invalid stream map; insufficient data for current channel "
+              "count: %d",
+              header->channels);
+        return false;
+    }
+    header->num_streams = data[kOpusHeaderNumStreamsOffset];
+    header->num_coupled = data[kOpusHeaderNumCoupledStreamsOffset];
+    if (header->num_streams + header->num_coupled != header->channels) {
+        ALOGV("Inconsistent channel mapping.");
+        return false;
+    }
+    for (int i = 0; i < header->channels; ++i)
+        header->stream_map[i] = data[kOpusHeaderStreamMapOffset + i];
+    return true;
+}
+
+int WriteOpusHeader(const OpusHeader &header, int input_sample_rate,
+                    uint8_t* output, size_t output_size) {
+    // See https://wiki.xiph.org/OggOpus#ID_Header.
+    const size_t total_size = kOpusHeaderStreamMapOffset + header.channels;
+    if (output_size < total_size) {
+        ALOGE("Output buffer too small for header.");
+        return -1;
+    }
+
+    // ensure entire header is cleared, even though we overwrite much of it below
+    memset(output, 0, output_size);
+
+    // Set magic signature.
+    memcpy(output + kOpusHeaderLabelOffset, "OpusHead", 8);
+    // Set Opus version.
+    output[kOpusHeaderVersionOffset] = 1;
+    // Set channel count.
+    output[kOpusHeaderChannelsOffset] = (uint8_t)header.channels;
+    // Set pre-skip
+    memcpy(output + kOpusHeaderSkipSamplesOffset, &header.skip_samples, sizeof(uint16_t));
+    // Set original input sample rate in Hz.
+    memcpy(output + kOpusHeaderSampleRateOffset, &input_sample_rate, sizeof(uint32_t));
+    // Set output gain in dB.
+    memcpy(output + kOpusHeaderGainOffset, &header.gain_db, sizeof(uint16_t));
+
+    if (header.channels > 2) {
+        // Set channel mapping
+        output[kOpusHeaderChannelMappingOffset] = 1;
+        // Assuming no coupled streams. This should actually be
+        // channels() - |coupled_streams|.
+        output[kOpusHeaderNumStreamsOffset] = header.channels;
+        output[kOpusHeaderNumCoupledStreamsOffset] = 0;
+
+        // Set the actual stream map.
+        for (int i = 0; i < header.channels; ++i) {
+            output[kOpusHeaderStreamMapOffset + i] = kOpusChannelMap[header.channels - 1][i];
+        }
+        return kOpusHeaderStreamMapOffset + header.channels + 1;
+    } else {
+        output[kOpusHeaderChannelMappingOffset] = 0;
+        return kOpusHeaderChannelMappingOffset + 1;
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/opus/include/OpusHeader.h b/media/libstagefright/opus/include/OpusHeader.h
new file mode 100644
index 0000000..f9f79cd
--- /dev/null
+++ b/media/libstagefright/opus/include/OpusHeader.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The Opus specification is part of IETF RFC 6716:
+ * http://tools.ietf.org/html/rfc6716
+ */
+
+#ifndef OPUS_HEADER_H_
+#define OPUS_HEADER_H_
+
+namespace android {
+
+struct OpusHeader {
+    int channels;
+    int channel_mapping;
+    int num_streams;
+    int num_coupled;
+    int16_t gain_db;
+    int skip_samples;
+    uint8_t stream_map[8];
+};
+
+bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
+int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
+}  // namespace android
+
+#endif  // OPUS_HEADER_H_
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index d1767d3..9bc9c89 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -46,9 +46,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -104,8 +101,5 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index 7c51333..97e1ec6 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -14,9 +14,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     include_dirs: [
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 97e1452..1f840b7 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -14,9 +14,6 @@
             "unsigned-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     srcs: [
@@ -31,6 +28,7 @@
 
     shared_libs: [
         "libstagefright_foundation",
+        "libstagefright_opus_common",
         "libutils",
         "liblog",
     ],
diff --git a/media/libstagefright/webm/WebmElement.cpp b/media/libstagefright/webm/WebmElement.cpp
index a5120b9..4d504e0 100644
--- a/media/libstagefright/webm/WebmElement.cpp
+++ b/media/libstagefright/webm/WebmElement.cpp
@@ -305,6 +305,7 @@
 }
 
 sp<WebmElement> WebmElement::AudioTrackEntry(
+        const char *codec,
         int chans,
         double rate,
         const sp<ABuffer> &buf,
@@ -322,7 +323,7 @@
             uid,
             lacing,
             lang,
-            "A_VORBIS",
+            codec,
             kAudioType,
             trackEntryFields);
 
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/WebmElement.h
index ffbba1b..a94c23f 100644
--- a/media/libstagefright/webm/WebmElement.h
+++ b/media/libstagefright/webm/WebmElement.h
@@ -50,6 +50,7 @@
     static sp<WebmElement> SegmentInfo(uint64_t scale = 1000000, double dur = 0);
 
     static sp<WebmElement> AudioTrackEntry(
+            const char *codec,
             int chans,
             double rate,
             const sp<ABuffer> &buf,
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 4d73eb8..7b4b23a 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -23,6 +23,8 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <OpusHeader.h>
 
 #include <utils/Errors.h>
 
@@ -112,46 +114,102 @@
 // static
 sp<WebmElement> WebmWriter::audioTrack(const sp<MetaData>& md) {
     int32_t nChannels, samplerate;
-    uint32_t type;
-    const void *headerData1;
-    const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
-            'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
-    const void *headerData3;
-    size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
+    const char* mimeType;
 
     if (!md->findInt32(kKeyChannelCount, &nChannels)
-            || !md->findInt32(kKeySampleRate, &samplerate)
-            || !md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
-            || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+        || !md->findInt32(kKeySampleRate, &samplerate)
+        || !md->findCString(kKeyMIMEType, &mimeType)) {
         ALOGE("Missing format keys for audio track");
         md->dumpToLog();
         return NULL;
     }
 
-    size_t codecPrivateSize = 1;
-    codecPrivateSize += XiphLaceCodeLen(headerSize1);
-    codecPrivateSize += XiphLaceCodeLen(headerSize2);
-    codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
+    if (!strncasecmp(mimeType, MEDIA_MIMETYPE_AUDIO_OPUS, strlen(MEDIA_MIMETYPE_AUDIO_OPUS))) {
+        // Opus in WebM is a well-known, yet under-documented, format. The codec private data
+        // of the track is an Opus Ogg header (https://tools.ietf.org/html/rfc7845#section-5.1)
+        // The name of the track isn't standardized, its value should be "A_OPUS".
+        OpusHeader header;
+        header.channels = nChannels;
+        header.num_streams = nChannels;
+        header.num_coupled = 0;
+        // - Channel mapping family (8 bits unsigned)
+        //  --  0 = one stream: mono or L,R stereo
+        //  --  1 = channels in vorbis spec order: mono or L,R stereo or ... or FL,C,FR,RL,RR,LFE, ...
+        //  --  2..254 = reserved (treat as 255)
+        //  --  255 = no defined channel meaning
+        //
+        //  our implementation encodes:  0, 1, or 255
+        header.channel_mapping = ((nChannels > 8) ? 255 : (nChannels > 2));
+        header.gain_db = 0;
+        header.skip_samples = 0;
 
-    off_t off = 0;
-    sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
-    uint8_t *codecPrivateData = codecPrivateBuf->data();
-    codecPrivateData[off++] = 2;
+        // headers are 21-bytes + something driven by channel count
+        // expect numbers in the low 30's here. WriteOpusHeader() will tell us
+        // if things are bad.
+        unsigned char header_data[100];
+        int headerSize = WriteOpusHeader(header, samplerate, (uint8_t*)header_data,
+                                            sizeof(header_data));
 
-    off += XiphLaceEnc(codecPrivateData + off, headerSize1);
-    off += XiphLaceEnc(codecPrivateData + off, headerSize2);
+        if (headerSize < 0) {
+            // didn't fill out that header for some reason
+            ALOGE("failed to generate OPUS header");
+            return NULL;
+        }
 
-    memcpy(codecPrivateData + off, headerData1, headerSize1);
-    off += headerSize1;
-    memcpy(codecPrivateData + off, headerData2, headerSize2);
-    off += headerSize2;
-    memcpy(codecPrivateData + off, headerData3, headerSize3);
+        size_t codecPrivateSize = 0;
+        codecPrivateSize += headerSize;
 
-    sp<WebmElement> entry = WebmElement::AudioTrackEntry(
-            nChannels,
-            samplerate,
-            codecPrivateBuf);
-    return entry;
+        off_t off = 0;
+        sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
+        uint8_t* codecPrivateData = codecPrivateBuf->data();
+
+        memcpy(codecPrivateData + off, (uint8_t*)header_data, headerSize);
+        sp<WebmElement> entry =
+                WebmElement::AudioTrackEntry("A_OPUS", nChannels, samplerate, codecPrivateBuf);
+        return entry;
+    } else if (!strncasecmp(mimeType,
+                            MEDIA_MIMETYPE_AUDIO_VORBIS,
+                            strlen(MEDIA_MIMETYPE_AUDIO_VORBIS))) {
+        uint32_t type;
+        const void *headerData1;
+        const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
+                'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
+        const void *headerData3;
+        size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
+
+        if (!md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
+            || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+            ALOGE("Missing header format keys for vorbis track");
+            md->dumpToLog();
+            return NULL;
+        }
+
+        size_t codecPrivateSize = 1;
+        codecPrivateSize += XiphLaceCodeLen(headerSize1);
+        codecPrivateSize += XiphLaceCodeLen(headerSize2);
+        codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
+
+        off_t off = 0;
+        sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
+        uint8_t *codecPrivateData = codecPrivateBuf->data();
+        codecPrivateData[off++] = 2;
+
+        off += XiphLaceEnc(codecPrivateData + off, headerSize1);
+        off += XiphLaceEnc(codecPrivateData + off, headerSize2);
+
+        memcpy(codecPrivateData + off, headerData1, headerSize1);
+        off += headerSize1;
+        memcpy(codecPrivateData + off, headerData2, headerSize2);
+        off += headerSize2;
+        memcpy(codecPrivateData + off, headerData3, headerSize3);
+
+        sp<WebmElement> entry =
+                WebmElement::AudioTrackEntry("A_VORBIS", nChannels, samplerate, codecPrivateBuf);
+        return entry;
+    } else {
+        ALOGE("Track (%s) is not a supported audio format", mimeType);
+        return NULL;
+    }
 }
 
 size_t WebmWriter::numTracks() {
@@ -382,16 +440,18 @@
     const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
     const char *vp9 = MEDIA_MIMETYPE_VIDEO_VP9;
     const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;
+    const char* opus = MEDIA_MIMETYPE_AUDIO_OPUS;
 
     size_t streamIndex;
     if (!strncasecmp(mime, vp8, strlen(vp8)) ||
         !strncasecmp(mime, vp9, strlen(vp9))) {
         streamIndex = kVideoIndex;
-    } else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
+    } else if (!strncasecmp(mime, vorbis, strlen(vorbis)) ||
+               !strncasecmp(mime, opus, strlen(opus))) {
         streamIndex = kAudioIndex;
     } else {
-        ALOGE("Track (%s) other than %s, %s or %s is not supported",
-              mime, vp8, vp9, vorbis);
+        ALOGE("Track (%s) other than %s, %s, %s, or %s is not supported",
+              mime, vp8, vp9, vorbis, opus);
         return ERROR_UNSUPPORTED;
     }
 
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index 5737ac2..b55dbb0 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -36,9 +36,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index 992dc9a..5dbcd08 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -561,7 +561,7 @@
     return processedBytes == mPacketSize ? processedBytes : -1;
 }
 
-int MtpDataPacket::write(struct usb_request *request,
+int64_t MtpDataPacket::write(struct usb_request *request,
                          UrbPacketDivisionMode divisionMode,
                          int fd,
                          size_t payloadSize) {
diff --git a/media/mtp/MtpDataPacket.h b/media/mtp/MtpDataPacket.h
index 1ddb821..138f7b0 100644
--- a/media/mtp/MtpDataPacket.h
+++ b/media/mtp/MtpDataPacket.h
@@ -117,7 +117,8 @@
     int                 write(struct usb_request *request, UrbPacketDivisionMode divisionMode);
     // Similar to previous write method but it reads the payload from |fd|. If |size| is larger than
     // MTP_BUFFER_SIZE, the data will be sent by multiple bulk transfer requests.
-    int                 write(struct usb_request *request, UrbPacketDivisionMode divisionMode,
+    // Return type (int64_t) is used to handle the case that the size can be larger than 2GB.
+    int64_t             write(struct usb_request *request, UrbPacketDivisionMode divisionMode,
                               int fd, size_t size);
 #endif
 
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index 993797a..9665c58 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -516,7 +516,7 @@
     return (MtpObjectHandle)-1;
 }
 
-bool MtpDevice::sendObject(MtpObjectHandle handle, int size, int srcFD) {
+bool MtpDevice::sendObject(MtpObjectHandle handle, uint32_t size, int srcFD) {
     std::lock_guard<std::mutex> lg(mMutex);
 
     if (mLastSendObjectInfoTransactionID + 1 != mTransactionID ||
@@ -529,7 +529,7 @@
     if (sendRequest(MTP_OPERATION_SEND_OBJECT)) {
         mData.setOperationCode(mRequest.getOperationCode());
         mData.setTransactionID(mRequest.getTransactionID());
-        const int writeResult = mData.write(mRequestOut, mPacketDivisionMode, srcFD, size);
+        const int64_t writeResult = mData.write(mRequestOut, mPacketDivisionMode, srcFD, size);
         const MtpResponseCode ret = readResponse();
         return ret == MTP_RESPONSE_OK && writeResult > 0;
     }
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index 8cf9e5e..01bc3db 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -104,7 +104,7 @@
     MtpObjectInfo*          getObjectInfo(MtpObjectHandle handle);
     void*                   getThumbnail(MtpObjectHandle handle, int& outLength);
     MtpObjectHandle         sendObjectInfo(MtpObjectInfo* info);
-    bool                    sendObject(MtpObjectHandle handle, int size, int srcFD);
+    bool                    sendObject(MtpObjectHandle handle, uint32_t size, int srcFD);
     bool                    deleteObject(MtpObjectHandle handle);
     MtpObjectHandle         getParent(MtpObjectHandle handle);
     MtpStorageID            getStorageID(MtpObjectHandle handle);
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 541ba3e..6976950 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -61,7 +61,6 @@
     ],
 
     cflags: [
-        "-fvisibility=hidden",
         "-DEXPORT=__attribute__((visibility(\"default\")))",
         "-Werror",
         "-Wall",
@@ -104,6 +103,11 @@
             enabled: false,
         },
     },
+    version_script: "libmediandk.map.txt",
+    stubs: {
+        symbol_file: "libmediandk.map.txt",
+        versions: ["29"],
+    },
 }
 
 llndk_library {
@@ -149,9 +153,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index bf9725c..e0af80d 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -273,6 +273,9 @@
 EXPORT const char* AMEDIAFORMAT_KEY_ALBUMARTIST = "albumartist";
 EXPORT const char* AMEDIAFORMAT_KEY_ARTIST = "artist";
 EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO = "audio-presentation-info";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PRESENTATION_ID =
+        "audio-presentation-presentation-id";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PROGRAM_ID = "audio-presentation-program-id";
 EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
 EXPORT const char* AMEDIAFORMAT_KEY_AUTHOR = "author";
 EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
@@ -320,6 +323,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
 EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
 EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO = "hdr10-plus-info";
 EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
 EXPORT const char* AMEDIAFORMAT_KEY_ICC_PROFILE = "icc-profile";
 EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
@@ -327,6 +331,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
 EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
 EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
+EXPORT const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME = "is-sync-frame";
 EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
 EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
 EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index 9dc120d..b3ee853 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -241,12 +241,6 @@
 AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
- * Get format of the buffer. The specified buffer index must have been previously obtained from
- * dequeueOutputBuffer.
- */
-AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(21);
-
-/**
  * If you are done with a buffer, use this call to return the buffer to
  * the codec. If you previously specified a surface when configuring this
  * video decoder you can optionally render the buffer.
@@ -353,6 +347,12 @@
 #if __ANDROID_API__ >= 28
 
 /**
+ * Get format of the buffer. The specified buffer index must have been previously obtained from
+ * dequeueOutputBuffer.
+ */
+AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(28);
+
+/**
  * Get the component name. If the codec was created by createDecoderByType
  * or createEncoderByType, what component is chosen is not known beforehand.
  * Caller shall call AMediaCodec_releaseName to free the returned pointer.
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 658cbac..2cd1d04 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -181,12 +181,11 @@
 extern const char* AMEDIAFORMAT_KEY_ALBUMARTIST __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_ARTIST __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PRESENTATION_ID __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PROGRAM_ID __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_AUTHOR __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_BITS_PER_SAMPLE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_CDTRACKNUMBER __INTRODUCED_IN(29);
-extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE __INTRODUCED_IN(29);
-extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD __INTRODUCED_IN(29);
-extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_COMPILATION __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_COMPOSER __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE __INTRODUCED_IN(29);
@@ -207,8 +206,8 @@
 extern const char* AMEDIAFORMAT_KEY_EXIF_SIZE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_FRAME_COUNT __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_GENRE __INTRODUCED_IN(29);
-extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_ICC_PROFILE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_LOOP __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_LYRICIST __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d3bdbae..3567899 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -35,64 +35,110 @@
     AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_PROFILE; # var introduced=21
     AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+    AMEDIAFORMAT_KEY_ALBUM; # var introduced=29
+    AMEDIAFORMAT_KEY_ALBUMART; # var introduced=29
+    AMEDIAFORMAT_KEY_ALBUMARTIST; # var introduced=29
+    AMEDIAFORMAT_KEY_ARTIST; # var introduced=29
+    AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO; # var introduced=29
     AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+    AMEDIAFORMAT_KEY_AUTHOR; # var introduced=29
     AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
     AMEDIAFORMAT_KEY_BIT_RATE; # var introduced=21
+    AMEDIAFORMAT_KEY_BITS_PER_SAMPLE; # var introduced=29
     AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
+    AMEDIAFORMAT_KEY_CDTRACKNUMBER; # var introduced=29
     AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var introduced=21
     AMEDIAFORMAT_KEY_CHANNEL_MASK; # var introduced=21
     AMEDIAFORMAT_KEY_COLOR_FORMAT; # var introduced=21
     AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+    AMEDIAFORMAT_KEY_COMPILATION; # var introduced=29
     AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+    AMEDIAFORMAT_KEY_COMPOSER; # var introduced=29
+    AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE; # var introduced=29
+    AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK; # var introduced=29
+    AMEDIAFORMAT_KEY_CRYPTO_IV; # var introduced=29
+    AMEDIAFORMAT_KEY_CRYPTO_KEY; # var introduced=29
+    AMEDIAFORMAT_KEY_CRYPTO_MODE; # var introduced=29
+    AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK; # var introduced=29
     AMEDIAFORMAT_KEY_CSD; # var introduced=28
     AMEDIAFORMAT_KEY_CSD_0; # var introduced=28
     AMEDIAFORMAT_KEY_CSD_1; # var introduced=28
     AMEDIAFORMAT_KEY_CSD_2; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_AVC; # var introduced=29
+    AMEDIAFORMAT_KEY_CSD_HEVC; # var introduced=29
+    AMEDIAFORMAT_KEY_D263; # var introduced=29
+    AMEDIAFORMAT_KEY_DATE; # var introduced=29
+    AMEDIAFORMAT_KEY_DISCNUMBER; # var introduced=29
     AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
     AMEDIAFORMAT_KEY_DISPLAY_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_DISPLAY_WIDTH; # var introduced=28
     AMEDIAFORMAT_KEY_DURATION; # var introduced=21
+    AMEDIAFORMAT_KEY_ENCODER_DELAY; # var introduced=29
+    AMEDIAFORMAT_KEY_ENCODER_PADDING; # var introduced=29
+    AMEDIAFORMAT_KEY_ESDS; # var introduced=29
+    AMEDIAFORMAT_KEY_EXIF_OFFSET; # var introduced=29
+    AMEDIAFORMAT_KEY_EXIF_SIZE; # var introduced=29
+    AMEDIAFORMAT_KEY_FRAME_COUNT; # var introduced=29
     AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var introduced=21
     AMEDIAFORMAT_KEY_FRAME_RATE; # var introduced=21
+    AMEDIAFORMAT_KEY_GENRE; # var introduced=29
     AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
     AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
     AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
     AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
+    AMEDIAFORMAT_KEY_ICC_PROFILE; # var introduced=29
     AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
     AMEDIAFORMAT_KEY_IS_ADTS; # var introduced=21
     AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var introduced=21
     AMEDIAFORMAT_KEY_IS_DEFAULT; # var introduced=21
     AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_SYNC_FRAME; # var introduced=29
     AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
     AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
     AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
     AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
+    AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
+    AMEDIAFORMAT_KEY_LOOP; # var introduced=29
+    AMEDIAFORMAT_KEY_LYRICIST; # var introduced=29
+    AMEDIAFORMAT_KEY_MAX_BIT_RATE; # var introduced=29
     AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
     AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
     AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
     AMEDIAFORMAT_KEY_MIME; # var introduced=21
     AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
+    AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER; # var introduced=29
     AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+    AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN; # var introduced=29
     AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
     AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
     AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
+    AMEDIAFORMAT_KEY_PSSH; # var introduced=29
     AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
     AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
     AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
     AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
+    AMEDIAFORMAT_KEY_SAR_HEIGHT; # var introduced=29
+    AMEDIAFORMAT_KEY_SAR_WIDTH; # var introduced=29
     AMEDIAFORMAT_KEY_SEI; # var introduced=28
     AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
+    AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT; # var introduced=29
     AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID; # var introduced=28
     AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+    AMEDIAFORMAT_KEY_TEXT_FORMAT_DATA; # var introduced=29
+    AMEDIAFORMAT_KEY_THUMBNAIL_HEIGHT; # var introduced=29
+    AMEDIAFORMAT_KEY_THUMBNAIL_TIME; # var introduced=29
+    AMEDIAFORMAT_KEY_THUMBNAIL_WIDTH; # var introduced=29
+    AMEDIAFORMAT_KEY_TITLE; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
     AMEDIAFORMAT_KEY_TIME_US; # var introduced=28
     AMEDIAFORMAT_KEY_TRACK_INDEX; # var introduced=28
     AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
     AMEDIAFORMAT_KEY_WIDTH; # var introduced=21
+    AMEDIAFORMAT_KEY_YEAR; # var introduced=29
     AMediaCodecActionCode_isRecoverable; # introduced=28
     AMediaCodecActionCode_isTransient; # introduced=28
     AMediaCodecCryptoInfo_delete;
@@ -112,6 +158,7 @@
     AMediaCodec_dequeueInputBuffer;
     AMediaCodec_dequeueOutputBuffer;
     AMediaCodec_flush;
+    AMediaCodec_getBufferFormat; # introduced=28
     AMediaCodec_getInputBuffer;
     AMediaCodec_getInputFormat; # introduced=28
     AMediaCodec_getName; # introduced=28
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaController.java b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
index 1f29185..60f74ab 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaController.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
@@ -149,12 +149,9 @@
         if (keyEvent == null) {
             throw new IllegalArgumentException("KeyEvent may not be null");
         }
-        //TODO(b/119789707): Resolve hidden API usage: KeyEvent#isMediaKey
-        /*
-        if (!KeyEvent.isMediaKey(keyEvent.getKeyCode())) {
+        if (!KeyEvent.isMediaSessionKey(keyEvent.getKeyCode())) {
             return false;
         }
-        */
         try {
             //TODO(b/119748678): Resolve mContext.getOpPackageName() through this file.
             // Temporarilly it's replaced with "mContext.getOpPackageName()" for compiling.
@@ -199,9 +196,8 @@
             }
 
             case KeyEvent.ACTION_UP: {
-                //TODO(b/119790339): Resolve hidden API usage. AudioManager.FLAG_FROM_KEY
-                final int flags = AudioManager.FLAG_PLAY_SOUND | AudioManager.FLAG_VIBRATE;
-                        //| AudioManager.FLAG_FROM_KEY;
+                final int flags = AudioManager.FLAG_PLAY_SOUND | AudioManager.FLAG_VIBRATE
+                        | AudioManager.FLAG_FROM_KEY;
                 try {
                     mSessionBinder.adjustVolume("mContext.getOpPackageName()", mCbStub, true, 0,
                             flags);
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
index deb0203..1ae1d2c 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
@@ -1070,12 +1070,8 @@
 
         private static RemoteUserInfo createRemoteUserInfo(String packageName, int pid, int uid,
                 ISessionControllerCallback caller) {
-            //TODO(b/119752205): Resolve hidden API usage. 4-param constructor of RemoteUserInfo
-            /*
             return new RemoteUserInfo(packageName, pid, uid,
                     caller != null ? caller.asBinder() : null);
-            */
-            return new RemoteUserInfo(packageName, pid, uid);
         }
 
         @Override
diff --git a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
index 2f2fbaf..fa7696e 100644
--- a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
+++ b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
@@ -544,12 +544,8 @@
             throw new IllegalStateException("This should be called inside of onGetRoot or"
                     + " onLoadChildren or onLoadItem methods");
         }
-        //TODO(b/119752205): Resolve hidden API usage. 4-param constructor of RemoteUserInfo
-        /*
         return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid,
                 mCurConnection.callbacks.asBinder());
-        */
-        return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid);
     }
 
     /**
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 06975ac..26f76c0 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -746,7 +746,7 @@
         output.afFrameCount = thread->frameCount();
         output.afSampleRate = thread->sampleRate();
         output.afLatencyMs = thread->latency();
-        output.trackId = track == nullptr ? -1 : track->id();
+        output.portId = portId;
 
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
@@ -1766,7 +1766,7 @@
 
     output.cblk = recordTrack->getCblk();
     output.buffers = recordTrack->getBuffers();
-    output.trackId = recordTrack->id();
+    output.portId = portId;
 
     // return handle to client
     recordHandle = new RecordHandle(recordTrack);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index b6b3815..6c698f6 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -365,16 +365,17 @@
     static inline bool isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) {
         switch (audio_channel_mask_get_representation(channelMask)) {
         case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
-            uint32_t channelCount = FCC_2; // stereo is default
-            if (kEnableExtendedChannels) {
-                channelCount = audio_channel_count_from_out_mask(channelMask);
-                if (channelCount < FCC_2 // mono is not supported at this time
-                        || channelCount > AudioMixer::MAX_NUM_CHANNELS) {
-                    return false;
-                }
+            // Haptic channel mask is only applicable for channel position mask.
+            const uint32_t channelCount = audio_channel_count_from_out_mask(
+                    channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
+            const uint32_t maxChannelCount = kEnableExtendedChannels
+                    ? AudioMixer::MAX_NUM_CHANNELS : FCC_2;
+            if (channelCount < FCC_2 // mono is not supported at this time
+                    || channelCount > maxChannelCount) {
+                return false;
             }
             // check that channelMask is the "canonical" one we expect for the channelCount.
-            return channelMask == audio_channel_out_mask_from_count(channelCount);
+            return audio_channel_position_mask_is_out_canonical(channelMask);
             }
         case AUDIO_CHANNEL_REPRESENTATION_INDEX:
             if (kEnableExtendedChannels) {
@@ -553,6 +554,7 @@
         virtual void        pause();
         virtual status_t    attachAuxEffect(int effectId);
         virtual status_t    setParameters(const String8& keyValuePairs);
+        virtual status_t    selectPresentation(int presentationId, int programId);
         virtual media::VolumeShaper::Status applyVolumeShaper(
                 const sp<media::VolumeShaper::Configuration>& configuration,
                 const sp<media::VolumeShaper::Operation>& operation) override;
diff --git a/services/audioflinger/BufLog.cpp b/services/audioflinger/BufLog.cpp
index ae96036..5f6aca0 100644
--- a/services/audioflinger/BufLog.cpp
+++ b/services/audioflinger/BufLog.cpp
@@ -115,7 +115,7 @@
         unsigned int samplingRate,
         size_t maxBytes = 0) : mId(id), mFormat(format), mChannels(channels),
                 mSamplingRate(samplingRate), mMaxBytes(maxBytes) {
-    mByteCount = 0l;
+    mByteCount = 0;
     mPaused = false;
     if (tag != NULL) {
         (void)audio_utils_strlcpy(mTag, tag);
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index d15841f..f328577 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -37,8 +37,9 @@
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 #endif
-#include <audio_utils/mono_blend.h>
+#include <audio_utils/channels.h>
 #include <audio_utils/format.h>
+#include <audio_utils/mono_blend.h>
 #include <media/AudioMixer.h>
 #include "FastMixer.h"
 #include "TypedLogger.h"
@@ -159,20 +160,24 @@
     if (current->mOutputSinkGen != mOutputSinkGen) {
         mOutputSink = current->mOutputSink;
         mOutputSinkGen = current->mOutputSinkGen;
+        mSinkChannelMask = current->mSinkChannelMask;
         if (mOutputSink == NULL) {
             mFormat = Format_Invalid;
             mSampleRate = 0;
             mSinkChannelCount = 0;
             mSinkChannelMask = AUDIO_CHANNEL_NONE;
+            mAudioChannelCount = 0;
         } else {
             mFormat = mOutputSink->format();
             mSampleRate = Format_sampleRate(mFormat);
             mSinkChannelCount = Format_channelCount(mFormat);
             LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
 
-            // TODO: Add channel mask to NBAIO_Format
-            // We assume that the channel mask must be a valid positional channel mask.
-            mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+            if (mSinkChannelMask == AUDIO_CHANNEL_NONE) {
+                mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+            }
+            mAudioChannelCount = mSinkChannelCount - audio_channel_count_from_out_mask(
+                    mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
         }
         dumpState->mSampleRate = mSampleRate;
     }
@@ -288,6 +293,8 @@
                         (void *)(uintptr_t)fastTrack->mChannelMask);
                 mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
                         (void *)(uintptr_t)mSinkChannelMask);
+                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
+                        (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
                 mMixer->enable(name);
             }
             mGenerations[i] = fastTrack->mGeneration;
@@ -324,6 +331,8 @@
                             (void *)(uintptr_t)fastTrack->mChannelMask);
                     mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
                             (void *)(uintptr_t)mSinkChannelMask);
+                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
+                            (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
                     // already enabled
                 }
                 mGenerations[i] = fastTrack->mGeneration;
@@ -468,6 +477,13 @@
             memcpy_by_audio_format(buffer, mFormat.mFormat, mMixerBuffer, mMixerBufferFormat,
                     frameCount * Format_channelCount(mFormat));
         }
+        if (mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
+            // When there are haptic channels, the sample data is partially interleaved.
+            // Make the sample data fully interleaved here.
+            adjust_channels_non_destructive(buffer, mAudioChannelCount, buffer, mSinkChannelCount,
+                    audio_bytes_per_sample(mFormat.mFormat),
+                    frameCount * audio_bytes_per_frame(mAudioChannelCount, mFormat.mFormat));
+        }
         // if non-NULL, then duplicate write() to this non-blocking sink
 #ifdef TEE_SINK
         mTee.write(buffer, frameCount);
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 1c86d9a..1d332e0 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -76,6 +76,8 @@
     size_t          mMixerBufferSize;
     audio_format_t  mMixerBufferFormat; // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
 
+    uint32_t        mAudioChannelCount; // audio channel count, excludes haptic channels.
+
     enum {UNDEFINED, MIXED, ZEROED} mMixerBufferState;
     NBAIO_Format    mFormat;
     unsigned        mSampleRate;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index c7fcbd8..9d2a733 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -47,6 +47,7 @@
     audio_channel_mask_t    mChannelMask;    // AUDIO_CHANNEL_OUT_MONO or AUDIO_CHANNEL_OUT_STEREO
     audio_format_t          mFormat;         // track format
     int                     mGeneration;     // increment when any field is assigned
+    bool                    mHapticPlaybackEnabled = false; // haptic playback is enabled or not
 };
 
 // Represents a single state of the fast mixer
@@ -69,6 +70,9 @@
     NBAIO_Sink* mOutputSink;    // HAL output device, must already be negotiated
     int         mOutputSinkGen; // increment when mOutputSink is assigned
     size_t      mFrameCount;    // number of frames per fast mix buffer
+    audio_channel_mask_t mSinkChannelMask; // If not AUDIO_CHANNEL_NONE, specifies sink channel
+                                           // mask when it cannot be directly calculated from
+                                           // channel count
 
     // Extends FastThreadState::Command
     static const Command
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 53ea9a4..d9f570d 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -67,6 +67,7 @@
             bool        isStatic() const { return  mSharedBuffer.get() != nullptr; }
 
             status_t    setParameters(const String8& keyValuePairs);
+            status_t    selectPresentation(int presentationId, int programId);
             status_t    attachAuxEffect(int EffectId);
             void        setAuxBuffer(int EffectId, int32_t *buffer);
             int32_t     *auxBuffer() const { return mAuxBuffer; }
@@ -111,6 +112,14 @@
     /** Copy the track metadata in the provided iterator. Thread safe. */
     virtual void    copyMetadataTo(MetadataInserter& backInserter) const;
 
+            /** Return haptic playback of the track is enabled or not, used in mixer. */
+            bool    getHapticPlaybackEnabled() const { return mHapticPlaybackEnabled; }
+            /** Set haptic playback of the track is enabled or not, should be
+             *  set after query or get callback from vibrator service */
+            void    setHapticPlaybackEnabled(bool hapticPlaybackEnabled) {
+                mHapticPlaybackEnabled = hapticPlaybackEnabled;
+            }
+
 protected:
     // for numerous
     friend class PlaybackThread;
@@ -187,6 +196,8 @@
 
     sp<media::VolumeHandler>  mVolumeHandler; // handles multiple VolumeShaper configs and operations
 
+    bool                mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
+
 private:
     // The following fields are only for fast tracks, and should be in a subclass
     int                 mFastIndex; // index within FastMixerState::mFastTracks[];
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index b0c9fda..85f5456 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -71,6 +71,12 @@
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
+    static  bool        checkServerLatencySupported(
+                                audio_format_t format, audio_input_flags_t flags) {
+                            return audio_is_linear_pcm(format)
+                                    && (flags & AUDIO_INPUT_FLAG_HW_AV_SYNC) == 0;
+                        }
+
 private:
     friend class AudioFlinger;  // for mState
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index f833cf7..c6941c0 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -38,6 +38,7 @@
 
 #include <private/media/AudioTrackShared.h>
 #include <private/android_filesystem_config.h>
+#include <audio_utils/channels.h>
 #include <audio_utils/mono_blend.h>
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
@@ -751,6 +752,7 @@
             audio_channel_mask_get_representation(mask);
 
     switch (representation) {
+    // Travel all single bit channel mask to convert channel mask to string.
     case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
         if (output) {
             if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
@@ -773,6 +775,8 @@
             if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
             if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
             if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
+            if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, " );
+            if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, " );
             if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
         } else {
             if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -1845,6 +1849,10 @@
     dumpBase(fd, args);
 
     dprintf(fd, "  Master mute: %s\n", mMasterMute ? "on" : "off");
+    if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+        dprintf(fd, "  Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
+                channelMaskToString(mHapticChannelMask, true /* output */).c_str());
+    }
     dprintf(fd, "  Normal frame count: %zu\n", mNormalFrameCount);
     dprintf(fd, "  Last write occurred (msecs): %llu\n",
             (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
@@ -1946,7 +1954,7 @@
             audio_is_linear_pcm(format) &&
             // TODO: extract as a data library function that checks that a computationally
             // expensive downmixer is not required: isFastOutputChannelConversion()
-            (channelMask == mChannelMask ||
+            (channelMask == (mChannelMask | mHapticChannelMask) ||
                     mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
                     (channelMask == AUDIO_CHANNEL_OUT_MONO
                             /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
@@ -2348,6 +2356,17 @@
                     track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
         }
 
+        // Disable all haptic playback for all other active tracks when haptic playback is supported
+        // and the track contains haptic channels. Enable haptic playback for current track.
+        // TODO: Request actual haptic playback status from vibrator service
+        if ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+                && mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+            for (auto &t : mActiveTracks) {
+                t->setHapticPlaybackEnabled(false);
+            }
+            track->setHapticPlaybackEnabled(true);
+        }
+
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
         mActiveTracks.add(track);
@@ -2413,6 +2432,14 @@
     return String8();
 }
 
+status_t AudioFlinger::DirectOutputThread::selectPresentation(int presentationId, int programId) {
+    Mutex::Autolock _l(mLock);
+    if (mOutput == nullptr || mOutput->stream == nullptr) {
+        return NO_INIT;
+    }
+    return mOutput->stream->selectPresentation(presentationId, programId);
+}
+
 void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
     sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
     ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
@@ -2627,6 +2654,11 @@
         (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
     }
 
+    mHapticChannelMask = mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+    mChannelMask &= ~mHapticChannelMask;
+    mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
+    mChannelCount -= mHapticChannelCount;
+
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
     // Note that mLock is not held when readOutputParameters_l() is called from the constructor
@@ -2999,7 +3031,7 @@
         // Only one effect chain can be present in direct output thread and it uses
         // the sink buffer as input
         if (mType != DIRECT) {
-            size_t numSamples = mNormalFrameCount * mChannelCount;
+            size_t numSamples = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
             status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
                     numSamples * sizeof(effect_buffer_t),
                     &halInBuffer);
@@ -3498,7 +3530,17 @@
                 }
 
                 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
-                        mNormalFrameCount * mChannelCount);
+                        mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+
+                // If we're going directly to the sink and there are haptic channels,
+                // we should adjust channels as the sample data is partially interleaved
+                // in this case.
+                if (!mEffectBufferValid && mHapticChannelCount > 0) {
+                    adjust_channels_non_destructive(buffer, mChannelCount, buffer,
+                            mChannelCount + mHapticChannelCount,
+                            audio_bytes_per_sample(format),
+                            audio_bytes_per_frame(mChannelCount, format) * mNormalFrameCount);
+                }
             }
 
             mBytesRemaining = mCurrentWriteLength;
@@ -3542,7 +3584,15 @@
             }
 
             memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
-                    mNormalFrameCount * mChannelCount);
+                    mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+            // The sample data is partially interleaved when haptic channels exist,
+            // we need to adjust channels here.
+            if (mHapticChannelCount > 0) {
+                adjust_channels_non_destructive(mSinkBuffer, mChannelCount, mSinkBuffer,
+                        mChannelCount + mHapticChannelCount,
+                        audio_bytes_per_sample(mFormat),
+                        audio_bytes_per_frame(mChannelCount, mFormat) * mNormalFrameCount);
+            }
         }
 
         // enable changes in effect chain
@@ -3708,6 +3758,7 @@
 // removeTracks_l() must be called with ThreadBase::mLock held
 void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
 {
+    bool enabledHapticTracksRemoved = false;
     for (const auto& track : tracksToRemove) {
         mActiveTracks.remove(track);
         ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
@@ -3729,6 +3780,18 @@
             // remove from our tracks vector
             removeTrack_l(track);
         }
+        enabledHapticTracksRemoved |= track->getHapticPlaybackEnabled();
+    }
+    // If the thread supports haptic playback and the track playing haptic data was removed,
+    // enable haptic playback on the first active track that contains haptic channels.
+    // TODO: Query vibrator service to know which track should enable haptic playback.
+    if (enabledHapticTracksRemoved && mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+        for (auto &t : mActiveTracks) {
+            if (t->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) {
+                t->setHapticPlaybackEnabled(true);
+                break;
+            }
+        }
     }
 }
 
@@ -3934,7 +3997,8 @@
     // create an NBAIO sink for the HAL output stream, and negotiate
     mOutputSink = new AudioStreamOutSink(output->stream);
     size_t numCounterOffers = 0;
-    const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
+    const NBAIO_Format offers[1] = {Format_from_SR_C(
+            mSampleRate, mChannelCount + mHapticChannelCount, mFormat)};
 #if !LOG_NDEBUG
     ssize_t index =
 #else
@@ -3976,7 +4040,7 @@
             // change our Sink format to accept our intermediate precision
             mFormat = fastMixerFormat;
             free(mSinkBuffer);
-            mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
+            mFrameSize = audio_bytes_per_frame(mChannelCount + mHapticChannelCount, mFormat);
             const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
             (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
         }
@@ -4018,8 +4082,10 @@
         // wrap the source side of the MonoPipe to make it an AudioBufferProvider
         fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
         fastTrack->mVolumeProvider = NULL;
-        fastTrack->mChannelMask = mChannelMask; // mPipeSink channel mask for audio to FastMixer
+        fastTrack->mChannelMask = mChannelMask | mHapticChannelMask; // mPipeSink channel mask for
+                                                                     // audio to FastMixer
         fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
+        fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
         fastTrack->mGeneration++;
         state->mFastTracksGen++;
         state->mTrackMask = 1;
@@ -4027,6 +4093,10 @@
         state->mOutputSink = mOutputSink.get();
         state->mOutputSinkGen++;
         state->mFrameCount = mFrameCount;
+        // specify sink channel mask when haptic channel mask present as it can not
+        // be calculated directly from channel count
+        state->mSinkChannelMask = mHapticChannelMask == AUDIO_CHANNEL_NONE
+                ? AUDIO_CHANNEL_NONE : mChannelMask | mHapticChannelMask;
         state->mCommand = FastMixerState::COLD_IDLE;
         // already done in constructor initialization list
         //mFastMixerFutex = 0;
@@ -4403,6 +4473,7 @@
         std::vector<std::pair<sp<Track>, size_t>> mUnderrunFrames;
     } deferredOperations(&mixerStatus); // implicit nested scope for variable capture
 
+    bool noFastHapticTrack = true;
     for (size_t i=0 ; i<count ; i++) {
         const sp<Track> t = mActiveTracks[i];
 
@@ -4411,6 +4482,9 @@
 
         // process fast tracks
         if (track->isFastTrack()) {
+            if (track->getHapticPlaybackEnabled()) {
+                noFastHapticTrack = false;
+            }
 
             // It's theoretically possible (though unlikely) for a fast track to be created
             // and then removed within the same normal mix cycle.  This is not a problem, as
@@ -4536,6 +4610,7 @@
                     fastTrack->mVolumeProvider = vp;
                     fastTrack->mChannelMask = track->mChannelMask;
                     fastTrack->mFormat = track->mFormat;
+                    fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
                     fastTrack->mGeneration++;
                     state->mTrackMask |= 1 << j;
                     didModify = true;
@@ -4581,6 +4656,10 @@
                 // Avoids a misleading display in dumpsys
                 track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL;
             }
+            if (fastTrack->mHapticPlaybackEnabled != track->getHapticPlaybackEnabled()) {
+                fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
+                didModify = true;
+            }
             continue;
         }
 
@@ -4788,7 +4867,8 @@
             mAudioMixer->setParameter(
                 trackId,
                 AudioMixer::TRACK,
-                AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask);
+                AudioMixer::MIXER_CHANNEL_MASK,
+                (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
             uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
@@ -4849,6 +4929,10 @@
                 trackId,
                 AudioMixer::TRACK,
                 AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
+            mAudioMixer->setParameter(
+                trackId,
+                AudioMixer::TRACK,
+                AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
 
             // reset retry count
             track->mRetryCount = kMaxTrackRetries;
@@ -4916,6 +5000,17 @@
 
     }
 
+    if (mHapticChannelMask != AUDIO_CHANNEL_NONE && sq != NULL) {
+        // When there is no fast track playing haptic and FastMixer exists,
+        // enabling the first FastTrack, which provides mixed data from normal
+        // tracks, to play haptic data.
+        FastTrack *fastTrack = &state->mFastTracks[0];
+        if (fastTrack->mHapticPlaybackEnabled != noFastHapticTrack) {
+            fastTrack->mHapticPlaybackEnabled = noFastHapticTrack;
+            didModify = true;
+        }
+    }
+
     // Push the new FastMixer state if necessary
     bool pauseAudioWatchdog = false;
     if (didModify) {
@@ -7359,8 +7454,7 @@
         status_t status = NO_ERROR;
         if (recordTrack->isExternalTrack()) {
             mLock.unlock();
-            bool silenced;
-            status = AudioSystem::startInput(recordTrack->portId(), &silenced);
+            status = AudioSystem::startInput(recordTrack->portId());
             mLock.lock();
             if (recordTrack->isInvalid()) {
                 recordTrack->clearSyncStartEvent();
@@ -7388,7 +7482,6 @@
                 recordTrack->clearSyncStartEvent();
                 return status;
             }
-            recordTrack->setSilenced(silenced);
         }
         // Catch up with current buffer indices if thread is already running.
         // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
@@ -7554,7 +7647,7 @@
         (void)input->stream->dump(fd);
     }
 
-    const double latencyMs = audio_is_linear_pcm(mFormat)
+    const double latencyMs = RecordTrack::checkServerLatencySupported(mFormat, flags)
             ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
     if (latencyMs != 0.) {
         dprintf(fd, "  NormalRecord latency ms: %.2lf\n", latencyMs);
@@ -8338,11 +8431,10 @@
         return BAD_VALUE;
     }
 
-    bool silenced = false;
     if (isOutput()) {
         ret = AudioSystem::startOutput(portId);
     } else {
-        ret = AudioSystem::startInput(portId, &silenced);
+        ret = AudioSystem::startInput(portId);
     }
 
     Mutex::Autolock _l(mLock);
@@ -8363,21 +8455,21 @@
         return PERMISSION_DENIED;
     }
 
-    if (isOutput()) {
-        // force volume update when a new track is added
-        mHalVolFloat = -1.0f;
-    } else if (!silenced) {
-        for (const sp<MmapTrack> &track : mActiveTracks) {
-            if (track->isSilenced_l() && track->uid() != client.clientUid)
-                track->invalidate();
-        }
-    }
-
     // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
     sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
                                         isOutput(), client.clientUid, client.clientPid, portId);
 
-    track->setSilenced_l(silenced);
+    if (isOutput()) {
+        // force volume update when a new track is added
+        mHalVolFloat = -1.0f;
+    } else if (!track->isSilenced_l()) {
+        for (const sp<MmapTrack> &t : mActiveTracks) {
+            if (t->isSilenced_l() && t->uid() != client.clientUid)
+                t->invalidate();
+        }
+    }
+
+
     mActiveTracks.add(track);
     sp<EffectChain> chain = getEffectChain_l(mSessionId);
     if (chain != 0) {
@@ -9138,7 +9230,13 @@
 
 status_t AudioFlinger::MmapCaptureThread::exitStandby()
 {
-    mInput->stream->setGain(1.0f);
+    {
+        // mInput might have been cleared by clearInput()
+        Mutex::Autolock _l(mLock);
+        if (mInput != nullptr && mInput->stream != nullptr) {
+            mInput->stream->setGain(1.0f);
+        }
+    }
     return MmapThread::exitStandby();
 }
 
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 49fc234..e8b2158 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -907,6 +907,11 @@
     int64_t                         mBytesWritten;
     int64_t                         mFramesWritten; // not reset on standby
     int64_t                         mSuspendedFrames; // not reset on standby
+
+    // mHapticChannelMask and mHapticChannelCount will only be valid when the thread support
+    // haptic playback.
+    audio_channel_mask_t            mHapticChannelMask = AUDIO_CHANNEL_NONE;
+    uint32_t                        mHapticChannelCount = 0;
 private:
     // mMasterMute is in both PlaybackThread and in AudioFlinger.  When a
     // PlaybackThread needs to find out if master-muted, it checks it's local
@@ -1199,6 +1204,8 @@
                        audio_io_handle_t id, audio_devices_t device, bool systemReady);
     virtual                 ~DirectOutputThread();
 
+                status_t    selectPresentation(int presentationId, int programId);
+
     // Thread virtuals
 
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f2617ae..9a7f1f1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -326,6 +326,10 @@
     return mTrack->setParameters(keyValuePairs);
 }
 
+status_t AudioFlinger::TrackHandle::selectPresentation(int presentationId, int programId) {
+    return mTrack->selectPresentation(presentationId, programId);
+}
+
 VolumeShaper::Status AudioFlinger::TrackHandle::applyVolumeShaper(
         const sp<VolumeShaper::Configuration>& configuration,
         const sp<VolumeShaper::Operation>& operation) {
@@ -498,7 +502,7 @@
 
 void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
-    result.appendFormat("Type     Id Active Client Session S  Flags "
+    result.appendFormat("Type     Id Active Client Session Port Id S  Flags "
                         "  Format Chn mask  SRate "
                         "ST Usg CT "
                         " G db  L dB  R dB  VS dB "
@@ -584,7 +588,7 @@
             ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
                     ? 'e' /* error */ : ' ' /* identical */;
 
-    result.appendFormat("%7s %6u %7u %2s 0x%03X "
+    result.appendFormat("%7s %6u %7u %7u %2s 0x%03X "
                         "%08X %08X %6u "
                         "%2u %3x %2x "
                         "%5.2g %5.2g %5.2g %5.2g%c "
@@ -592,6 +596,7 @@
             active ? "yes" : "no",
             (mClient == 0) ? getpid() : mClient->pid(),
             mSessionId,
+            mPortId,
             getTrackStateString(),
             mCblk->mFlags,
 
@@ -976,6 +981,19 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::selectPresentation(int presentationId,
+        int programId) {
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread == 0) {
+        ALOGE("thread is dead");
+        return FAILED_TRANSACTION;
+    } else if ((thread->type() == ThreadBase::DIRECT) || (thread->type() == ThreadBase::OFFLOAD)) {
+        DirectOutputThread *directOutputThread = static_cast<DirectOutputThread*>(thread.get());
+        return directOutputThread->selectPresentation(presentationId, programId);
+    }
+    return INVALID_OPERATION;
+}
+
 VolumeShaper::Status AudioFlinger::PlaybackThread::Track::applyVolumeShaper(
         const sp<VolumeShaper::Configuration>& configuration,
         const sp<VolumeShaper::Operation>& operation)
@@ -1755,7 +1773,7 @@
         thread->mFastTrackAvail = false;
     } else {
         // TODO: only Normal Record has timestamps (Fast Record does not).
-        mServerLatencySupported = audio_is_linear_pcm(mFormat);
+        mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
     }
 #ifdef TEE_SINK
     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
@@ -1869,7 +1887,7 @@
 
 void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.appendFormat("Active     Id Client Session S  Flags  "
+    result.appendFormat("Active     Id Client Session Port Id  S  Flags  "
                         " Format Chn mask  SRate Source  "
                         " Server FrmCnt FrmRdy Sil%s\n",
                         isServerLatencySupported() ? "   Latency" : "");
@@ -1877,7 +1895,7 @@
 
 void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
 {
-    result.appendFormat("%c%5s %6d %6u %7u %2s 0x%03X "
+    result.appendFormat("%c%5s %6d %6u %7u %7u  %2s 0x%03X "
             "%08X %08X %6u %6X "
             "%08X %6zu %6zu %3c",
             isFastTrack() ? 'F' : ' ',
@@ -1885,6 +1903,7 @@
             mId,
             (mClient == 0) ? getpid() : mClient->pid(),
             mSessionId,
+            mPortId,
             getTrackStateString(),
             mCblk->mFlags,
 
@@ -2125,15 +2144,16 @@
 
 void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
 {
-    result.appendFormat("Client Session   Format Chn mask  SRate Flags %s\n",
+    result.appendFormat("Client Session Port Id  Format Chn mask  SRate Flags %s\n",
                         isOut() ? "Usg CT": "Source");
 }
 
 void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
 {
-    result.appendFormat("%6u %7u %08X %08X %6u 0x%03X ",
+    result.appendFormat("%6u %7u %7u %08X %08X %6u 0x%03X ",
             mPid,
             mSessionId,
+            mPortId,
             mFormat,
             mChannelMask,
             mSampleRate,
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index f23e426..02ab8ad 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -81,9 +81,9 @@
 LOCAL_SHARED_LIBRARIES += libmedia_helper
 LOCAL_SHARED_LIBRARIES += libmediametrics
 
-ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 LOCAL_SHARED_LIBRARIES += libhidlbase libicuuc libxml2
 
+ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
 endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 3c3a82b..ea6389c 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -65,20 +65,6 @@
         API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
     } input_type_t;
 
-    enum {
-        API_INPUT_CONCURRENCY_NONE = 0,
-        API_INPUT_CONCURRENCY_CALL = (1 << 0),      // Concurrency with a call
-        API_INPUT_CONCURRENCY_CAPTURE = (1 << 1),   // Concurrency with another capture
-        API_INPUT_CONCURRENCY_HOTWORD = (1 << 2),   // Concurrency with a hotword
-        API_INPUT_CONCURRENCY_PREEMPT = (1 << 3),   // pre-empted someone
-                // NB: preempt is marked on a successful return, others are on failing calls
-        API_INPUT_CONCURRENCY_LAST = (1 << 4),
-
-        API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_LAST - 1),
-    };
-
-    typedef uint32_t concurrency_type__mask_t;
-
 public:
     virtual ~AudioPolicyInterface() {}
     //
@@ -141,9 +127,7 @@
                                      input_type_t *inputType,
                                      audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the input starts being used.
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool silenced,
-                                concurrency_type__mask_t *concurrency) = 0;
+    virtual status_t startInput(audio_port_handle_t portId) = 0;
     // indicates to the audio policy manager that the input stops being used.
     virtual status_t stopInput(audio_port_handle_t portId) = 0;
     // releases the input.
@@ -197,6 +181,8 @@
     virtual status_t    dump(int fd) = 0;
 
     virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo) = 0;
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) = 0;
 
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
@@ -242,6 +228,8 @@
                                         bool reported) = 0;
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
 
+    virtual bool     isHapticPlaybackSupported() = 0;
+
     virtual void     setAppState(uid_t uid, app_state_t state);
 };
 
diff --git a/services/audiopolicy/common/Android.bp b/services/audiopolicy/common/Android.bp
new file mode 100644
index 0000000..a925b9a
--- /dev/null
+++ b/services/audiopolicy/common/Android.bp
@@ -0,0 +1,4 @@
+cc_library_headers {
+    name: "libaudiopolicycommon",
+    export_include_dirs: ["include"],
+}
diff --git a/services/audiopolicy/common/Android.mk b/services/audiopolicy/common/Android.mk
deleted file mode 100644
index dcce8e3..0000000
--- a/services/audiopolicy/common/Android.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-
-LOCAL_PATH := $(call my-dir)
-include $(CLEAR_VARS)
-
-#######################################################################
-# Recursive call sub-folder Android.mk
-#
-include $(call all-makefiles-under,$(LOCAL_PATH))
-
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 9bd68e1..30b0044 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -32,14 +32,6 @@
 #define MAX_MIXER_CHANNEL_COUNT FCC_8
 
 /**
- * A device mask for all audio input devices that are considered "virtual" when evaluating
- * active inputs in getActiveInputs()
- */
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|\
-        AUDIO_DEVICE_IN_BUS|AUDIO_DEVICE_IN_FM_TUNER)
-
-
-/**
  * A device mask for all audio input and output devices where matching inputs/outputs on device
  * type alone is not enough: the address must match too
  */
@@ -68,23 +60,6 @@
 }
 
 /**
- * Check if the input device given is considered as a virtual device.
- *
- * @param[in] device to consider
- *
- * @return true if the device is a virtual one, false otherwise.
- */
-static inline bool is_virtual_input_device(audio_devices_t device)
-{
-    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
-        device &= ~AUDIO_DEVICE_BIT_IN;
-        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
-            return true;
-    }
-    return false;
-}
-
-/**
  * Check whether the device type is one
  * where addresses are used to distinguish between one connected device and another
  *
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
new file mode 100644
index 0000000..d0b4973
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -0,0 +1,53 @@
+cc_library_static {
+    name: "libaudiopolicycomponents",
+
+    srcs: [
+        "src/AudioCollections.cpp",
+        "src/AudioGain.cpp",
+        "src/AudioInputDescriptor.cpp",
+        "src/AudioOutputDescriptor.cpp",
+        "src/AudioPatch.cpp",
+        "src/AudioPolicyMix.cpp",
+        "src/AudioPort.cpp",
+        "src/AudioProfile.cpp",
+        "src/AudioRoute.cpp",
+        "src/ClientDescriptor.cpp",
+        "src/DeviceDescriptor.cpp",
+        "src/EffectDescriptor.cpp",
+        "src/HwModule.cpp",
+        "src/IOProfile.cpp",
+        "src/Serializer.cpp",
+        "src/SoundTriggerSession.cpp",
+        "src/TypeConverter.cpp",
+        "src/VolumeCurve.cpp",
+    ],
+    shared_libs: [
+        "libcutils",
+        "libhidlbase",
+        "libicuuc",
+        "liblog",
+        "libmedia",
+        "libutils",
+        "libxml2",
+    ],
+    export_shared_lib_headers: ["libmedia"],
+    static_libs: [
+        "libaudioutils",
+    ],
+    header_libs: [
+        "libaudiopolicycommon",
+    ],
+    export_header_lib_headers: ["libaudiopolicycommon"],
+
+    include_dirs: [
+        "frameworks/av/services/audiopolicy",
+    ],
+
+    export_include_dirs: ["include"],
+
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+
+}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
deleted file mode 100644
index 3336b79..0000000
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ /dev/null
@@ -1,66 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
-    src/DeviceDescriptor.cpp \
-    src/AudioGain.cpp \
-    src/HwModule.cpp \
-    src/IOProfile.cpp \
-    src/AudioPort.cpp \
-    src/AudioProfile.cpp \
-    src/AudioRoute.cpp \
-    src/AudioPolicyMix.cpp \
-    src/AudioPatch.cpp \
-    src/AudioInputDescriptor.cpp \
-    src/AudioOutputDescriptor.cpp \
-    src/AudioCollections.cpp \
-    src/EffectDescriptor.cpp \
-    src/SoundTriggerSession.cpp \
-    src/VolumeCurve.cpp \
-    src/TypeConverter.cpp \
-    src/ClientDescriptor.cpp
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils \
-    libmedia \
-    libutils \
-    liblog \
-
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
-
-LOCAL_C_INCLUDES := \
-    $(LOCAL_PATH)/include \
-    frameworks/av/services/audiopolicy/common/include \
-    frameworks/av/services/audiopolicy \
-    $(call include-path-for, audio-utils) \
-
-ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-
-LOCAL_SRC_FILES += src/Serializer.cpp
-
-LOCAL_SHARED_LIBRARIES += libhidlbase libicuuc libxml2
-
-LOCAL_C_INCLUDES += \
-    external/libxml2/include \
-    external/icu/icu4c/source/common
-
-else
-
-LOCAL_SRC_FILES += \
-    src/ConfigParsingUtils.cpp \
-    src/StreamDescriptor.cpp \
-    src/Gains.cpp
-
-endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := \
-    $(LOCAL_PATH)/include
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_CFLAGS := -Wall -Werror
-
-LOCAL_MODULE := libaudiopolicycomponents
-
-include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 6e4c044..9f8b8c0 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -58,9 +58,8 @@
     void clearPreemptedSessions();
     bool isActive() const { return mGlobalActiveCount > 0; }
     bool isSourceActive(audio_source_t source) const;
-    audio_source_t inputSource(bool activeOnly = false) const;
+    audio_source_t source() const;
     bool isSoundTrigger() const;
-    audio_source_t getHighestPrioritySource(bool activeOnly) const;
     void setClientActive(const sp<RecordClientDescriptor>& client, bool active);
     int32_t activeCount() { return mGlobalActiveCount; }
 
@@ -121,7 +120,7 @@
      * Only considers inputs from physical devices (e.g. main mic, headset mic) when
      * ignoreVirtualInputs is true.
      */
-    Vector<sp <AudioInputDescriptor> > getActiveInputs(bool ignoreVirtualInputs = true);
+    Vector<sp <AudioInputDescriptor> > getActiveInputs();
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index d1a0f9b..5099ebb 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -176,7 +176,8 @@
                     AUDIO_FORMAT_AAC_HE_V1, AUDIO_FORMAT_AAC_HE_V2, AUDIO_FORMAT_AAC_ELD,
                     AUDIO_FORMAT_AAC_XHE}},
             {AUDIO_FORMAT_DOLBY_TRUEHD, {}},
-            {AUDIO_FORMAT_E_AC3_JOC, {}}};
+            {AUDIO_FORMAT_E_AC3_JOC, {}},
+            {AUDIO_FORMAT_AC4, {}}};
     }
 
 private:
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 030bf4b..986d109 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -22,6 +22,7 @@
 #include <sys/types.h>
 
 #include <system/audio.h>
+#include <system/audio_policy.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -106,7 +107,7 @@
                         audio_port_handle_t preferredDeviceId,
                         audio_source_t source, audio_input_flags_t flags, bool isSoundTrigger) :
         ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
-        mSource(source), mFlags(flags), mIsSoundTrigger(isSoundTrigger), mSilenced(false) {}
+        mSource(source), mFlags(flags), mIsSoundTrigger(isSoundTrigger), mAppState(APP_STATE_IDLE) {}
     ~RecordClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
@@ -115,14 +116,16 @@
     audio_source_t source() const { return mSource; }
     audio_input_flags_t flags() const { return mFlags; }
     bool isSoundTrigger() const { return mIsSoundTrigger; }
-    void setSilenced(bool silenced) { mSilenced = silenced; }
-    bool isSilenced() const { return mSilenced; }
+    void setAppState(app_state_t appState) { mAppState = appState; }
+    app_state_t appState() { return mAppState; }
+    bool isSilenced() const { return mAppState == APP_STATE_IDLE; }
 
 private:
     const audio_source_t mSource;
     const audio_input_flags_t mFlags;
     const bool mIsSoundTrigger;
-          bool mSilenced;
+          app_state_t mAppState;
+
 };
 
 class SourceClientDescriptor: public TrackClientDescriptor
diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
deleted file mode 100644
index a007854..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "AudioPolicyConfig.h"
-#include "DeviceDescriptor.h"
-#include "HwModule.h"
-#include "audio_policy_conf.h"
-#include <system/audio.h>
-#include <utils/Log.h>
-#include <utils/Vector.h>
-#include <utils/SortedVector.h>
-#include <cutils/config_utils.h>
-#include <utils/RefBase.h>
-#include <system/audio_policy.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-// Definitions for audio_policy.conf file parsing
-// ----------------------------------------------------------------------------
-
-class ConfigParsingUtils
-{
-public:
-    static status_t loadConfig(const char *path, AudioPolicyConfig &config);
-
-private:
-    static void loadAudioPortGain(cnode *root, AudioPort &audioPort, int index);
-    static void loadAudioPortGains(cnode *root, AudioPort &audioPort);
-    static void loadDeviceDescriptorGains(cnode *root, sp<DeviceDescriptor> &deviceDesc);
-    static status_t loadHwModuleDevice(cnode *root, DeviceVector &devices);
-    static status_t loadHwModuleProfile(cnode *root, sp<HwModule> &module, audio_port_role_t role);
-    static void loadDevicesFromTag(const char *tag, DeviceVector &devices,
-                            const DeviceVector &declaredDevices);
-    static void loadHwModules(cnode *root, HwModuleCollection &hwModules,
-                              AudioPolicyConfig &config);
-    static void loadGlobalConfig(cnode *root, AudioPolicyConfig &config,
-                                 const sp<HwModule> &primaryModule);
-    static void loadModuleGlobalConfig(cnode *root, const sp<HwModule> &module,
-                                       AudioPolicyConfig &config);
-    static status_t loadHwModule(cnode *root, sp<HwModule> &module, AudioPolicyConfig &config);
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 83fb10c..6f99bf3 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -31,6 +31,8 @@
 public:
      // Note that empty name refers by convention to a generic device.
     explicit DeviceDescriptor(audio_devices_t type, const String8 &tagName = String8(""));
+    DeviceDescriptor(audio_devices_t type, const FormatVector &encodedFormats,
+            const String8 &tagName = String8(""));
 
     virtual ~DeviceDescriptor() {}
 
@@ -38,6 +40,8 @@
 
     audio_devices_t type() const { return mDeviceType; }
 
+    const FormatVector& encodedFormats() const { return mEncodedFormats; }
+
     bool equals(const sp<DeviceDescriptor>& other) const;
 
     // AudioPortConfig
@@ -59,6 +63,7 @@
 private:
     String8 mTagName; // Unique human readable identifier for a device port found in conf file.
     audio_devices_t     mDeviceType;
+    FormatVector        mEncodedFormats;
     audio_port_handle_t mId;
 
 friend class DeviceVector;
diff --git a/services/audiopolicy/common/managerdefinitions/include/Gains.h b/services/audiopolicy/common/managerdefinitions/include/Gains.h
deleted file mode 100644
index cb229a4..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/Gains.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <StreamDescriptor.h>
-#include <utils/KeyedVector.h>
-#include <system/audio.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-class StreamDescriptor;
-
-class Gains
-{
-public :
-    static float volIndexToDb(const VolumeCurvePoint *point, int indexMin, int indexMax,
-                              int indexInUi);
-
-    // default volume curve
-    static const VolumeCurvePoint sDefaultVolumeCurve[Volume::VOLCNT];
-    // default volume curve for media strategy
-    static const VolumeCurvePoint sDefaultMediaVolumeCurve[Volume::VOLCNT];
-    // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
-    static const VolumeCurvePoint sExtMediaSystemVolumeCurve[Volume::VOLCNT];
-    // volume curve for media strategy on speakers
-    static const VolumeCurvePoint sSpeakerMediaVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT];
-    // volume curve for sonification strategy on speakers
-    static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT];
-    static const VolumeCurvePoint sDefaultSystemVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[Volume::VOLCNT];
-    static const VolumeCurvePoint sHeadsetSystemVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sDefaultVoiceVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sLinearVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
-    static const VolumeCurvePoint sHearingAidVolumeCurve[Volume::VOLCNT];
-    // default volume curves per stream and device category. See initializeVolumeCurves()
-    static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index eb32959..8ff8238 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -35,7 +35,7 @@
 public:
     IOProfile(const String8 &name, audio_port_role_t role)
         : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
-          maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
+          maxOpenCount(1),
           curOpenCount(0),
           maxActiveCount(1),
           curActiveCount(0) {}
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
deleted file mode 100644
index 6266313..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "IVolumeCurvesCollection.h"
-#include <utils/KeyedVector.h>
-#include <utils/StrongPointer.h>
-#include <utils/SortedVector.h>
-#include <system/audio.h>
-
-namespace android {
-
-// stream descriptor used for volume control
-class StreamDescriptor
-{
-public:
-    StreamDescriptor();
-
-    int getVolumeIndex(audio_devices_t device) const;
-    bool canBeMuted() const { return mCanBeMuted; }
-    void clearCurrentVolumeIndex();
-    void addCurrentVolumeIndex(audio_devices_t device, int index);
-    int getVolumeIndexMin() const { return mIndexMin; }
-    int getVolumeIndexMax() const { return mIndexMax; }
-    void setVolumeIndexMin(int volIndexMin);
-    void setVolumeIndexMax(int volIndexMax);
-    bool hasVolumeIndexForDevice(audio_devices_t device) const
-    {
-        device = Volume::getDeviceForVolume(device);
-        return mIndexCur.indexOfKey(device) >= 0;
-    }
-
-    void dump(String8 *dst) const;
-
-    void setVolumeCurvePoint(device_category deviceCategory, const VolumeCurvePoint *point);
-    const VolumeCurvePoint *getVolumeCurvePoint(device_category deviceCategory) const
-    {
-        return mVolumeCurve[deviceCategory];
-    }
-
-private:
-    const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT];
-    KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
-    int mIndexMin; /**< min volume index. */
-    int mIndexMax; /**< max volume index. */
-    bool mCanBeMuted; /**< true is the stream can be muted. */
-};
-
-/**
- * stream descriptors collection for volume control
- */
-class StreamDescriptorCollection : public DefaultKeyedVector<audio_stream_type_t, StreamDescriptor>,
-                                   public IVolumeCurvesCollection
-{
-public:
-    StreamDescriptorCollection();
-
-    virtual void clearCurrentVolumeIndex(audio_stream_type_t stream);
-    virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
-                                       int index);
-    virtual bool canBeMuted(audio_stream_type_t stream);
-    virtual int getVolumeIndexMin(audio_stream_type_t stream) const
-    {
-        return valueFor(stream).getVolumeIndexMin();
-    }
-    virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
-    {
-        return valueFor(stream).getVolumeIndex(device);
-    }
-    virtual int getVolumeIndexMax(audio_stream_type_t stream) const
-    {
-        return valueFor(stream).getVolumeIndexMax();
-    }
-    virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
-                               int indexInUi) const;
-    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
-    virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled);
-    virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst);
-    virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
-                                         audio_devices_t device) const
-    {
-        return valueFor(stream).hasVolumeIndexForDevice(device);
-    }
-
-    void dump(String8 *dst) const override;
-
-private:
-    void setVolumeCurvePoint(audio_stream_type_t stream, device_category deviceCategory,
-                             const VolumeCurvePoint *point);
-    const VolumeCurvePoint *getVolumeCurvePoint(audio_stream_type_t stream,
-                                                device_category deviceCategory) const;
-    void setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin);
-    void setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax);
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 1f29874..559274f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -53,9 +53,32 @@
     return mId;
 }
 
-audio_source_t AudioInputDescriptor::inputSource(bool activeOnly) const
+audio_source_t AudioInputDescriptor::source() const
 {
-    return getHighestPrioritySource(activeOnly);
+    audio_source_t source = AUDIO_SOURCE_DEFAULT;
+
+    for (bool activeOnly : { true, false }) {
+        int32_t topPriority = -1;
+        app_state_t topState = APP_STATE_IDLE;
+        for (const auto &client : getClientIterable()) {
+            if (activeOnly && !client->active()) {
+                continue;
+            }
+            app_state_t curState = client->appState();
+            if (curState >= topState) {
+                int32_t curPriority = source_priority(client->source());
+                if (curPriority > topPriority) {
+                    source = client->source();
+                    topPriority = curPriority;
+                }
+                topState = curState;
+            }
+        }
+        if (source != AUDIO_SOURCE_DEFAULT) {
+            break;
+        }
+    }
+    return source;
 }
 
 void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -76,7 +99,7 @@
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
     dstConfig->ext.mix.hw_module = getModuleHandle();
     dstConfig->ext.mix.handle = mIoHandle;
-    dstConfig->ext.mix.usecase.source = inputSource();
+    dstConfig->ext.mix.usecase.source = source();
 }
 
 void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
@@ -125,24 +148,6 @@
     return false;
 }
 
-audio_source_t AudioInputDescriptor::getHighestPrioritySource(bool activeOnly) const
-{
-    audio_source_t source = AUDIO_SOURCE_DEFAULT;
-    int32_t priority = -1;
-
-    for (const auto &client : getClientIterable()) {
-        if (activeOnly && !client->active() ) {
-            continue;
-        }
-        int32_t curPriority = source_priority(client->source());
-        if (curPriority > priority) {
-            priority = curPriority;
-            source = client->source();
-        }
-    }
-    return source;
-}
-
 bool AudioInputDescriptor::isSoundTrigger() const {
     // sound trigger and non sound trigger clients are not mixed on a given input
     // so check only first client
@@ -224,7 +229,7 @@
 
 status_t AudioInputDescriptor::start()
 {
-    if (mGlobalActiveCount == 1) {
+    if (!isActive()) {
         if (!mProfile->canStartNewIo()) {
             ALOGI("%s mProfile->curActiveCount %d", __func__, mProfile->curActiveCount);
             return INVALID_OPERATION;
@@ -388,15 +393,13 @@
     return count;
 }
 
-Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs(bool ignoreVirtualInputs)
+Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs()
 {
     Vector<sp <AudioInputDescriptor> > activeInputs;
 
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
-        if ((inputDescriptor->isActive())
-                && (!ignoreVirtualInputs ||
-                    !is_virtual_input_device(inputDescriptor->mDevice))) {
+        if (inputDescriptor->isActive()) {
             activeInputs.add(inputDescriptor);
         }
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
deleted file mode 100644
index 59db81c..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::ConfigParsingUtils"
-//#define LOG_NDEBUG 0
-
-#include "ConfigParsingUtils.h"
-#include "AudioGain.h"
-#include "IOProfile.h"
-#include <system/audio.h>
-#include <media/AudioParameter.h>
-#include <media/TypeConverter.h>
-#include <utils/Log.h>
-#include <cutils/misc.h>
-
-namespace android {
-
-// --- audio_policy.conf file parsing
-
-//static
-void ConfigParsingUtils::loadAudioPortGain(cnode *root, AudioPort &audioPort, int index)
-{
-    cnode *node = root->first_child;
-
-    sp<AudioGain> gain = new AudioGain(index, audioPort.useInputChannelMask());
-
-    while (node) {
-        if (strcmp(node->name, GAIN_MODE) == 0) {
-            gain->setMode(GainModeConverter::maskFromString(node->value));
-        } else if (strcmp(node->name, GAIN_CHANNELS) == 0) {
-            audio_channel_mask_t mask;
-            if (audioPort.useInputChannelMask()) {
-                if (InputChannelConverter::fromString(node->value, mask)) {
-                    gain->setChannelMask(mask);
-                }
-            } else {
-                if (OutputChannelConverter::fromString(node->value, mask)) {
-                    gain->setChannelMask(mask);
-                }
-            }
-        } else if (strcmp(node->name, GAIN_MIN_VALUE) == 0) {
-            gain->setMinValueInMb(atoi(node->value));
-        } else if (strcmp(node->name, GAIN_MAX_VALUE) == 0) {
-            gain->setMaxValueInMb(atoi(node->value));
-        } else if (strcmp(node->name, GAIN_DEFAULT_VALUE) == 0) {
-            gain->setDefaultValueInMb(atoi(node->value));
-        } else if (strcmp(node->name, GAIN_STEP_VALUE) == 0) {
-            gain->setStepValueInMb(atoi(node->value));
-        } else if (strcmp(node->name, GAIN_MIN_RAMP_MS) == 0) {
-            gain->setMinRampInMs(atoi(node->value));
-        } else if (strcmp(node->name, GAIN_MAX_RAMP_MS) == 0) {
-            gain->setMaxRampInMs(atoi(node->value));
-        }
-        node = node->next;
-    }
-
-    ALOGV("loadGain() adding new gain mode %08x channel mask %08x min mB %d max mB %d",
-          gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
-          gain->getMaxValueInMb());
-
-    if (gain->getMode() == 0) {
-        return;
-    }
-    audioPort.mGains.add(gain);
-}
-
-void ConfigParsingUtils::loadAudioPortGains(cnode *root, AudioPort &audioPort)
-{
-    cnode *node = root->first_child;
-    int index = 0;
-    while (node) {
-        ALOGV("loadGains() loading gain %s", node->name);
-        loadAudioPortGain(node, audioPort, index++);
-        node = node->next;
-    }
-}
-
-//static
-void ConfigParsingUtils::loadDeviceDescriptorGains(cnode *root, sp<DeviceDescriptor> &deviceDesc)
-{
-    loadAudioPortGains(root, *deviceDesc);
-    if (deviceDesc->mGains.size() > 0) {
-        deviceDesc->mGains[0]->getDefaultConfig(&deviceDesc->mGain);
-    }
-}
-
-//static
-status_t ConfigParsingUtils::loadHwModuleDevice(cnode *root, DeviceVector &devices)
-{
-    cnode *node = root->first_child;
-
-    audio_devices_t type = AUDIO_DEVICE_NONE;
-    while (node) {
-        if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
-            deviceFromString(node->value, type);
-            break;
-        }
-        node = node->next;
-    }
-    if (type == AUDIO_DEVICE_NONE ||
-            (!audio_is_input_device(type) && !audio_is_output_device(type))) {
-        ALOGW("loadDevice() bad type %08x", type);
-        return BAD_VALUE;
-    }
-    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(type, String8(root->name));
-
-    node = root->first_child;
-    while (node) {
-        if (strcmp(node->name, APM_DEVICE_ADDRESS) == 0) {
-            deviceDesc->mAddress = String8((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            if (audio_is_input_device(type)) {
-                deviceDesc->addAudioProfile(
-                        new AudioProfile(gDynamicFormat,
-                                         inputChannelMasksFromString(node->value),
-                                         SampleRateVector()));
-            } else {
-                deviceDesc->addAudioProfile(
-                        new AudioProfile(gDynamicFormat,
-                                         outputChannelMasksFromString(node->value),
-                                         SampleRateVector()));
-            }
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            loadDeviceDescriptorGains(node, deviceDesc);
-        }
-        node = node->next;
-    }
-
-    ALOGV("loadDevice() adding device tag (literal type) %s type %08x address %s",
-          deviceDesc->getTagName().string(), type, deviceDesc->mAddress.string());
-
-    devices.add(deviceDesc);
-    return NO_ERROR;
-}
-
-//static
-status_t ConfigParsingUtils::loadHwModuleProfile(cnode *root, sp<HwModule> &module,
-                                                 audio_port_role_t role)
-{
-    cnode *node = root->first_child;
-
-    sp<IOProfile> profile = new IOProfile(String8(root->name), role);
-
-    AudioProfileVector audioProfiles;
-    SampleRateVector sampleRates;
-    ChannelsVector channels;
-    FormatVector formats;
-
-    while (node) {
-        if (strcmp(node->name, FORMATS_TAG) == 0 &&
-                strcmp(node->value, DYNAMIC_VALUE_TAG) != 0) {
-            formats = formatsFromString(node->value);
-        } else if (strcmp(node->name, SAMPLING_RATES_TAG) == 0 &&
-                  strcmp(node->value, DYNAMIC_VALUE_TAG) != 0) {
-            collectionFromString<SampleRateTraits>(node->value, sampleRates);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0 &&
-                   strcmp(node->value, DYNAMIC_VALUE_TAG) != 0) {
-            if (role == AUDIO_PORT_ROLE_SINK) {
-                channels = inputChannelMasksFromString(node->value);
-            } else {
-                channels = outputChannelMasksFromString(node->value);
-            }
-        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            DeviceVector devices;
-            loadDevicesFromTag(node->value, devices, module->getDeclaredDevices());
-            profile->setSupportedDevices(devices);
-        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
-            if (role == AUDIO_PORT_ROLE_SINK) {
-                profile->setFlags(InputFlagConverter::maskFromString(node->value));
-            } else {
-                profile->setFlags(OutputFlagConverter::maskFromString(node->value));
-            }
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            loadAudioPortGains(node, *profile);
-        }
-        node = node->next;
-    }
-    if (formats.isEmpty()) {
-        sp<AudioProfile> profileToAdd = new AudioProfile(gDynamicFormat, channels, sampleRates);
-        profileToAdd->setDynamicFormat(true);
-        profileToAdd->setDynamicChannels(channels.isEmpty());
-        profileToAdd->setDynamicRate(sampleRates.isEmpty());
-        audioProfiles.add(profileToAdd);
-    } else {
-        for (size_t i = 0; i < formats.size(); i++) {
-            // For compatibility reason, for each format, creates a profile with the same
-            // collection of rate and channels.
-            sp<AudioProfile> profileToAdd = new AudioProfile(formats[i], channels, sampleRates);
-            profileToAdd->setDynamicFormat(formats[i] == gDynamicFormat);
-            profileToAdd->setDynamicChannels(channels.isEmpty());
-            profileToAdd->setDynamicRate(sampleRates.isEmpty());
-            audioProfiles.add(profileToAdd);
-        }
-    }
-    profile->setAudioProfiles(audioProfiles);
-    ALOGW_IF(!profile->hasSupportedDevices(), "load%s() invalid supported devices",
-             role == AUDIO_PORT_ROLE_SINK ? "Input" : "Output");
-    if (profile->hasSupportedDevices()) {
-        ALOGV("load%s() adding Supported Devices %04x, mFlags %04x",
-              role == AUDIO_PORT_ROLE_SINK ? "Input" : "Output",
-              profile->getSupportedDevicesType(), profile->getFlags());
-        return module->addProfile(profile);
-    }
-    return BAD_VALUE;
-}
-
-//static
-status_t ConfigParsingUtils::loadHwModule(cnode *root, sp<HwModule> &module,
-                                          AudioPolicyConfig &config)
-{
-    status_t status = NAME_NOT_FOUND;
-    cnode *node = config_find(root, DEVICES_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        DeviceVector devices;
-        while (node) {
-            ALOGV("loadHwModule() loading device %s", node->name);
-            status_t tmpStatus = loadHwModuleDevice(node, devices);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-        module->setDeclaredDevices(devices);
-    }
-    node = config_find(root, OUTPUTS_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading output %s", node->name);
-            status_t tmpStatus = loadHwModuleProfile(node, module, AUDIO_PORT_ROLE_SOURCE);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    node = config_find(root, INPUTS_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading input %s", node->name);
-            status_t tmpStatus = loadHwModuleProfile(node, module, AUDIO_PORT_ROLE_SINK);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    loadModuleGlobalConfig(root, module, config);
-    return status;
-}
-
-//static
-void ConfigParsingUtils::loadHwModules(cnode *root, HwModuleCollection &hwModules,
-                                       AudioPolicyConfig &config)
-{
-    cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
-    if (node == NULL) {
-        return;
-    }
-
-    node = node->first_child;
-    while (node) {
-        ALOGV("loadHwModules() loading module %s", node->name);
-        sp<HwModule> module = new HwModule(node->name);
-        if (loadHwModule(node, module, config) == NO_ERROR) {
-            hwModules.add(module);
-        }
-        node = node->next;
-    }
-}
-
-//static
-void ConfigParsingUtils::loadDevicesFromTag(const char *tag, DeviceVector &devices,
-                                            const DeviceVector &declaredDevices)
-{
-    char *tagLiteral = strndup(tag, strlen(tag));
-    char *devTag = strtok(tagLiteral, AudioParameter::valueListSeparator);
-    while (devTag != NULL) {
-        if (strlen(devTag) != 0) {
-            audio_devices_t type;
-            if (deviceFromString(devTag, type)) {
-                uint32_t inBit = type & AUDIO_DEVICE_BIT_IN;
-                type &= ~AUDIO_DEVICE_BIT_IN;
-                while (type) {
-                  audio_devices_t singleType =
-                        inBit | (1 << (31 - __builtin_clz(type)));
-                    type &= ~singleType;
-                    sp<DeviceDescriptor> dev = new DeviceDescriptor(singleType);
-                    devices.add(dev);
-                }
-            } else {
-                sp<DeviceDescriptor> deviceDesc =
-                        declaredDevices.getDeviceFromTagName(String8(devTag));
-                if (deviceDesc != 0) {
-                    devices.add(deviceDesc);
-                }
-            }
-        }
-        devTag = strtok(NULL, AudioParameter::valueListSeparator);
-    }
-    free(tagLiteral);
-}
-
-//static
-void ConfigParsingUtils::loadModuleGlobalConfig(cnode *root, const sp<HwModule> &module,
-                                                AudioPolicyConfig &config)
-{
-    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
-
-    if (node == NULL) {
-        return;
-    }
-    DeviceVector declaredDevices;
-    if (module != NULL) {
-        declaredDevices = module->getDeclaredDevices();
-    }
-
-    node = node->first_child;
-    while (node) {
-        if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
-            DeviceVector availableOutputDevices;
-            loadDevicesFromTag(node->value, availableOutputDevices, declaredDevices);
-            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
-                  availableOutputDevices.types());
-            config.addAvailableOutputDevices(availableOutputDevices);
-        } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
-            audio_devices_t device = AUDIO_DEVICE_NONE;
-            deviceFromString(node->value, device);
-            if (device != AUDIO_DEVICE_NONE) {
-                sp<DeviceDescriptor> defaultOutputDevice = new DeviceDescriptor(device);
-                config.setDefaultOutputDevice(defaultOutputDevice);
-                ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", defaultOutputDevice->type());
-            } else {
-                ALOGW("loadGlobalConfig() default device not specified");
-            }
-        } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
-            DeviceVector availableInputDevices;
-            loadDevicesFromTag(node->value, availableInputDevices, declaredDevices);
-            ALOGV("loadGlobalConfig() Available InputDevices %08x", availableInputDevices.types());
-            config.addAvailableInputDevices(availableInputDevices);
-        } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
-            uint32_t major, minor;
-            sscanf((char *)node->value, "%u.%u", &major, &minor);
-            module->setHalVersion(major, minor);
-            ALOGV("loadGlobalConfig() mHalVersion = major %u minor %u", major, minor);
-        }
-        node = node->next;
-    }
-}
-
-//static
-void ConfigParsingUtils::loadGlobalConfig(cnode *root, AudioPolicyConfig &config,
-                                          const sp<HwModule>& primaryModule)
-{
-    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
-
-    if (node == NULL) {
-        return;
-    }
-    node = node->first_child;
-    while (node) {
-        if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
-            bool speakerDrcEnabled;
-            if (utilities::convertTo<std::string, bool>(node->value, speakerDrcEnabled)) {
-                ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", speakerDrcEnabled);
-                config.setSpeakerDrcEnabled(speakerDrcEnabled);
-            }
-        }
-        node = node->next;
-    }
-    loadModuleGlobalConfig(root, primaryModule, config);
-}
-
-//static
-status_t ConfigParsingUtils::loadConfig(const char *path, AudioPolicyConfig &config)
-{
-    cnode *root;
-    char *data;
-
-    data = (char *)load_file(path, NULL);
-    if (data == NULL) {
-        return -ENODEV;
-    }
-    root = config_node("", "");
-    config_load(root, data);
-
-    HwModuleCollection hwModules;
-    loadHwModules(root, hwModules, config);
-
-    // legacy audio_policy.conf files have one global_configuration section, attached to primary.
-    loadGlobalConfig(root, config, hwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY));
-
-    config.setHwModules(hwModules);
-
-    config.setDefaultSurroundFormats();
-
-    config_free(root);
-    free(root);
-    free(data);
-
-    ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
-    config.setSource(path);
-
-    return NO_ERROR;
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a9f7220..9e5f944 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -26,14 +26,25 @@
 namespace android {
 
 DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const String8 &tagName) :
+        DeviceDescriptor(type, FormatVector{}, tagName)
+{
+}
+
+DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const FormatVector &encodedFormats,
+        const String8 &tagName) :
     AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
               audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
                                              AUDIO_PORT_ROLE_SOURCE),
-    mAddress(""), mTagName(tagName), mDeviceType(type), mId(0)
+    mAddress(""), mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats), mId(0)
 {
     if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
         mAddress = String8("0");
     }
+    /* FIXME: read from APM config file */
+    if (type == AUDIO_DEVICE_OUT_HDMI) {
+        mEncodedFormats.add(AUDIO_FORMAT_AC3);
+        mEncodedFormats.add(AUDIO_FORMAT_IEC61937);
+    }
 }
 
 audio_port_handle_t DeviceDescriptor::getId() const
diff --git a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
deleted file mode 100644
index 6407a17..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::Gains"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-#include "Gains.h"
-#include <Volume.h>
-#include <math.h>
-#include <utils/String8.h>
-
-namespace android {
-
-// Enginedefault
-const VolumeCurvePoint
-Gains::sDefaultVolumeCurve[Volume::VOLCNT] = {
-    {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
-};
-
-
-const VolumeCurvePoint
-Gains::sDefaultMediaVolumeCurve[Volume::VOLCNT] = {
-    {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sExtMediaSystemVolumeCurve[Volume::VOLCNT] = {
-    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
-};
-
-const VolumeCurvePoint
-Gains::sSpeakerMediaVolumeCurve[Volume::VOLCNT] = {
-    {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT] = {
-    {1, -55.0f}, {20, -43.0f}, {86, -12.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sSpeakerSonificationVolumeCurve[Volume::VOLCNT] = {
-    {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT] = {
-    {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
-};
-
-// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
-// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
-// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
-// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
-
-const VolumeCurvePoint
-Gains::sDefaultSystemVolumeCurve[Volume::VOLCNT] = {
-    {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
-};
-
-const VolumeCurvePoint
-Gains::sDefaultSystemVolumeCurveDrc[Volume::VOLCNT] = {
-    {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
-};
-
-const VolumeCurvePoint
-Gains::sHeadsetSystemVolumeCurve[Volume::VOLCNT] = {
-    {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
-};
-
-const VolumeCurvePoint
-Gains::sDefaultVoiceVolumeCurve[Volume::VOLCNT] = {
-    {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sSpeakerVoiceVolumeCurve[Volume::VOLCNT] = {
-    {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sLinearVolumeCurve[Volume::VOLCNT] = {
-    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sSilentVolumeCurve[Volume::VOLCNT] = {
-    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
-};
-
-const VolumeCurvePoint
-Gains::sFullScaleVolumeCurve[Volume::VOLCNT] = {
-    {0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-Gains::sHearingAidVolumeCurve[Volume::VOLCNT] = {
-    {1, -128.0f}, {20, -80.0f}, {60, -40.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
-                                                  [DEVICE_CATEGORY_CNT] = {
-    { // AUDIO_STREAM_VOICE_CALL
-        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve     // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_SYSTEM
-        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve       // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_RING
-        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_MUSIC
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve     // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_ALARM
-        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_NOTIFICATION
-        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_BLUETOOTH_SCO
-        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve      // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_ENFORCED_AUDIBLE
-        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve       // DEVICE_CATEGORY_HEARING_AID
-    },
-    {  // AUDIO_STREAM_DTMF
-        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve       // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_TTS
-      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
-        Gains::sSilentVolumeCurve,    // DEVICE_CATEGORY_HEADSET
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sSilentVolumeCurve,    // DEVICE_CATEGORY_EARPIECE
-        Gains::sSilentVolumeCurve,    // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve  // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_ACCESSIBILITY
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sHearingAidVolumeCurve     // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_REROUTING
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_HEARING_AID
-    },
-    { // AUDIO_STREAM_PATCH
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
-        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_HEARING_AID
-    },
-};
-
-//static
-float Gains::volIndexToDb(const VolumeCurvePoint *curve, int indexMin, int indexMax, int indexInUi)
-{
-    // the volume index in the UI is relative to the min and max volume indices for this stream type
-    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex - curve[Volume::VOLMIN].mIndex;
-    int volIdx = (nbSteps * (indexInUi - indexMin)) / (indexMax - indexMin);
-
-    // find what part of the curve this index volume belongs to, or if it's out of bounds
-    int segment = 0;
-    if (volIdx < curve[Volume::VOLMIN].mIndex) {         // out of bounds
-        return VOLUME_MIN_DB;
-    } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
-        segment = 0;
-    } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
-        segment = 1;
-    } else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
-        segment = 2;
-    } else {                                                               // out of bounds
-        return 0.0f;
-    }
-
-    // linear interpolation in the attenuation table in dB
-    float decibels = curve[segment].mDBAttenuation +
-            ((float)(volIdx - curve[segment].mIndex)) *
-                ( (curve[segment+1].mDBAttenuation -
-                        curve[segment].mDBAttenuation) /
-                    ((float)(curve[segment+1].mIndex -
-                            curve[segment].mIndex)) );
-
-    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
-            curve[segment].mIndex, volIdx,
-            curve[segment+1].mIndex,
-            curve[segment].mDBAttenuation,
-            decibels,
-            curve[segment+1].mDBAttenuation);
-
-    return decibels;
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
deleted file mode 100644
index c311a4f..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::Volumes"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-#include "StreamDescriptor.h"
-#include "Gains.h"
-#include "policy.h"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-namespace android {
-
-// --- StreamDescriptor class implementation
-
-StreamDescriptor::StreamDescriptor()
-    :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
-{
-    // Initialize the current stream's index to mIndexMax so volume isn't 0 in
-    // cases where the Java layer doesn't call into the audio policy service to
-    // set the default volume.
-    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, mIndexMax);
-}
-
-int StreamDescriptor::getVolumeIndex(audio_devices_t device) const
-{
-    device = Volume::getDeviceForVolume(device);
-    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
-    if (mIndexCur.indexOfKey(device) < 0) {
-        device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
-    }
-    return mIndexCur.valueFor(device);
-}
-
-void StreamDescriptor::clearCurrentVolumeIndex()
-{
-    mIndexCur.clear();
-}
-
-void StreamDescriptor::addCurrentVolumeIndex(audio_devices_t device, int index)
-{
-    mIndexCur.add(device, index);
-}
-
-void StreamDescriptor::setVolumeIndexMin(int volIndexMin)
-{
-    mIndexMin = volIndexMin;
-}
-
-void StreamDescriptor::setVolumeIndexMax(int volIndexMax)
-{
-    mIndexMax = volIndexMax;
-}
-
-void StreamDescriptor::setVolumeCurvePoint(device_category deviceCategory,
-                                           const VolumeCurvePoint *point)
-{
-    mVolumeCurve[deviceCategory] = point;
-}
-
-void StreamDescriptor::dump(String8 *dst) const
-{
-    dst->appendFormat("%s         %02d         %02d         ",
-             mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
-    for (size_t i = 0; i < mIndexCur.size(); i++) {
-        dst->appendFormat("%04x : %02d, ",
-                 mIndexCur.keyAt(i),
-                 mIndexCur.valueAt(i));
-    }
-    dst->append("\n");
-}
-
-StreamDescriptorCollection::StreamDescriptorCollection()
-{
-    for (size_t stream = 0 ; stream < AUDIO_STREAM_CNT; stream++) {
-        add(static_cast<audio_stream_type_t>(stream), StreamDescriptor());
-    }
-}
-
-bool StreamDescriptorCollection::canBeMuted(audio_stream_type_t stream)
-{
-    return valueAt(stream).canBeMuted();
-}
-
-void StreamDescriptorCollection::clearCurrentVolumeIndex(audio_stream_type_t stream)
-{
-    editValueAt(stream).clearCurrentVolumeIndex();
-}
-
-void StreamDescriptorCollection::addCurrentVolumeIndex(audio_stream_type_t stream,
-                                                       audio_devices_t device, int index)
-{
-    editValueAt(stream).addCurrentVolumeIndex(device, index);
-}
-
-void StreamDescriptorCollection::setVolumeCurvePoint(audio_stream_type_t stream,
-                                                     device_category deviceCategory,
-                                                     const VolumeCurvePoint *point)
-{
-    editValueAt(stream).setVolumeCurvePoint(deviceCategory, point);
-}
-
-const VolumeCurvePoint *StreamDescriptorCollection::getVolumeCurvePoint(audio_stream_type_t stream,
-                                                                        device_category deviceCategory) const
-{
-    return valueAt(stream).getVolumeCurvePoint(deviceCategory);
-}
-
-void StreamDescriptorCollection::setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin)
-{
-    return editValueAt(stream).setVolumeIndexMin(volIndexMin);
-}
-
-void StreamDescriptorCollection::setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax)
-{
-    return editValueAt(stream).setVolumeIndexMax(volIndexMax);
-}
-
-float StreamDescriptorCollection::volIndexToDb(audio_stream_type_t stream, device_category category,
-                                               int indexInUi) const
-{
-    const StreamDescriptor &streamDesc = valueAt(stream);
-    return Gains::volIndexToDb(streamDesc.getVolumeCurvePoint(category),
-                               streamDesc.getVolumeIndexMin(), streamDesc.getVolumeIndexMax(),
-                               indexInUi);
-}
-
-status_t StreamDescriptorCollection::initStreamVolume(audio_stream_type_t stream,
-                                                      int indexMin, int indexMax)
-{
-    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
-    if (indexMin < 0 || indexMin >= indexMax) {
-        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d",
-              stream , indexMin, indexMax);
-        return BAD_VALUE;
-    }
-    setVolumeIndexMin(stream, indexMin);
-    setVolumeIndexMax(stream, indexMax);
-    return NO_ERROR;
-}
-
-void StreamDescriptorCollection::initializeVolumeCurves(bool isSpeakerDrcEnabled)
-{
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
-            setVolumeCurvePoint(static_cast<audio_stream_type_t>(i),
-                                static_cast<device_category>(j),
-                                Gains::sVolumeProfiles[i][j]);
-        }
-    }
-
-    // Check availability of DRC on speaker path: if available, override some of the speaker curves
-    if (isSpeakerDrcEnabled) {
-        setVolumeCurvePoint(AUDIO_STREAM_SYSTEM, DEVICE_CATEGORY_SPEAKER,
-                            Gains::sDefaultSystemVolumeCurveDrc);
-        setVolumeCurvePoint(AUDIO_STREAM_RING, DEVICE_CATEGORY_SPEAKER,
-                            Gains::sSpeakerSonificationVolumeCurveDrc);
-        setVolumeCurvePoint(AUDIO_STREAM_ALARM, DEVICE_CATEGORY_SPEAKER,
-                            Gains::sSpeakerSonificationVolumeCurveDrc);
-        setVolumeCurvePoint(AUDIO_STREAM_NOTIFICATION, DEVICE_CATEGORY_SPEAKER,
-                            Gains::sSpeakerSonificationVolumeCurveDrc);
-        setVolumeCurvePoint(AUDIO_STREAM_MUSIC, DEVICE_CATEGORY_SPEAKER,
-                            Gains::sSpeakerMediaVolumeCurveDrc);
-        setVolumeCurvePoint(AUDIO_STREAM_ACCESSIBILITY, DEVICE_CATEGORY_SPEAKER,
-                            Gains::sSpeakerMediaVolumeCurveDrc);
-    }
-}
-
-void StreamDescriptorCollection::switchVolumeCurve(audio_stream_type_t streamSrc,
-                                                   audio_stream_type_t streamDst)
-{
-    for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
-        setVolumeCurvePoint(streamDst, static_cast<device_category>(j),
-                            Gains::sVolumeProfiles[streamSrc][j]);
-    }
-}
-
-void StreamDescriptorCollection::dump(String8 *dst) const
-{
-    dst->append("\nStreams dump:\n");
-    dst->append(
-             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
-    for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat(" %02zu      ", i);
-        valueAt(i).dump(dst);
-    }
-}
-
-} // namespace android
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index 1d037c3..42c52de 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -198,26 +198,10 @@
 
     <!-- End of Volume section -->
 
-    <?disabledUntilHalV4_1
-    <!-- Surround configuration -->
+    <!-- Surround Sound configuration -->
 
-    <surroundSound>
-      <!-- Each of the listed formats gets an entry in Surround Settings dialog.
-           There must be a corresponding Java ENCODING_... contant defined in AudioFormat.java,
-           and a display name defined in AudioFormat.toDisplayName. For the formats that don't
-           need a dedicated Surrond Settings dialog entry, a subformats list should be used. -->
-      <formats>
-        <format name="AUDIO_FORMAT_AC3" />
-        <format name="AUDIO_FORMAT_E_AC3" />
-        <format name="AUDIO_FORMAT_E_AC3_JOC" />
-        <format name="AUDIO_FORMAT_DOLBY_TRUEHD" />
-        <format name="AUDIO_FORMAT_DTS" />
-        <format name="AUDIO_FORMAT_DTS_HD" />
-        <format name="AUDIO_FORMAT_AAC_LC" subformats="AUDIO_FORMAT_AAC_HE_V1 AUDIO_FORMAT_AAC_HE_V2 AUDIO_FORMAT_AAC_ELD AUDIO_FORMAT_AAC_XHE" />
-      </formats>
-    </surroundSound>
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
 
-    <!-- End of Surround configuration -->
-    ?>
+    <!-- End of Surround Sound configuration -->
 
 </audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/msd_audio_policy_configuration.xml b/services/audiopolicy/config/msd_audio_policy_configuration.xml
index a811f5e..db17bc6 100644
--- a/services/audiopolicy/config/msd_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/msd_audio_policy_configuration.xml
@@ -32,6 +32,9 @@
             <profile name="" format="AUDIO_FORMAT_E_AC3"
                      samplingRates="32000,44100,48000"
                      channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+            <profile name="" format="AUDIO_FORMAT_E_AC3_JOC"
+                     samplingRates="32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
             <profile name="" format="AUDIO_FORMAT_AC4"
                      samplingRates="32000,44100,48000"
                      channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
@@ -50,6 +53,18 @@
        <devicePort tagName="MS12 Input" type="AUDIO_DEVICE_OUT_BUS"  role="sink">
            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+           <profile name="" format="AUDIO_FORMAT_AC3"
+                    samplingRates="32000,44100,48000"
+                    channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1"/>
+           <profile name="" format="AUDIO_FORMAT_E_AC3"
+                    samplingRates="32000,44100,48000"
+                    channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+            <profile name="" format="AUDIO_FORMAT_E_AC3_JOC"
+                     samplingRates="32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+           <profile name="" format="AUDIO_FORMAT_AC4"
+                    samplingRates="32000,44100,48000"
+                    channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
        </devicePort>
        <devicePort tagName="MS12 Output" type="AUDIO_DEVICE_IN_BUS"  role="source">
            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
diff --git a/services/audiopolicy/config/surround_sound_configuration_5_0.xml b/services/audiopolicy/config/surround_sound_configuration_5_0.xml
new file mode 100644
index 0000000..590a181
--- /dev/null
+++ b/services/audiopolicy/config/surround_sound_configuration_5_0.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<surroundSound>
+  <!-- Each of the listed formats gets an entry in Surround Settings dialog on TV devices.
+       There must be a corresponding Java ENCODING_... contant defined in AudioFormat.java,
+       and a display name defined in AudioFormat.toDisplayName. For the formats that don't
+       need a dedicated Surrond Settings dialog entry, a subformats list has to be used. -->
+  <formats>
+    <format name="AUDIO_FORMAT_AC3" />
+    <format name="AUDIO_FORMAT_E_AC3" />
+    <format name="AUDIO_FORMAT_E_AC3_JOC" />
+    <format name="AUDIO_FORMAT_DOLBY_TRUEHD" />
+    <format name="AUDIO_FORMAT_DTS" />
+    <format name="AUDIO_FORMAT_DTS_HD" />
+    <format name="AUDIO_FORMAT_AAC_LC" subformats="AUDIO_FORMAT_AAC_HE_V1 AUDIO_FORMAT_AAC_HE_V2 AUDIO_FORMAT_AAC_ELD AUDIO_FORMAT_AAC_XHE" />
+    <format name="AUDIO_FORMAT_AC4" />
+  </formats>
+</surroundSound>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 3909202..f07b797 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -36,6 +36,7 @@
 
 #include <inttypes.h>
 #include <math.h>
+#include <unordered_set>
 #include <vector>
 
 #include <AudioPolicyManagerInterface.h>
@@ -49,10 +50,6 @@
 #include <system/audio.h>
 #include <audio_policy_conf.h>
 #include "AudioPolicyManager.h"
-#ifndef USE_XML_AUDIO_POLICY_CONF
-#include <ConfigParsingUtils.h>
-#include <StreamDescriptor.h>
-#endif
 #include <Serializer.h>
 #include "TypeConverter.h"
 #include <policy.h>
@@ -220,7 +217,8 @@
             audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
             updateCallRouting(newDevice);
         }
-        const audio_devices_t msdOutDevice = getMsdAudioOutDeviceTypes();
+        const audio_devices_t msdOutDevice = getModuleDeviceTypes(
+                mAvailableOutputDevices, AUDIO_HARDWARE_MODULE_ID_MSD);
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
@@ -486,7 +484,7 @@
 
     audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(outputDevice, mOutputs);
-    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+    audio_io_handle_t output = selectOutput(outputs);
     // request to reuse existing output stream if one is already opened to reach the target device
     if (output != AUDIO_IO_HANDLE_NONE) {
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
@@ -529,6 +527,24 @@
     return deviceList.itemAt(0);
 }
 
+audio_devices_t AudioPolicyManager::getModuleDeviceTypes(
+        const DeviceVector& devices, const char *moduleId) const {
+    sp<HwModule> mod = mHwModules.getModuleFromName(moduleId);
+    return mod != 0 ? devices.getDeviceTypesFromHwModule(mod->getHandle()) : AUDIO_DEVICE_NONE;
+}
+
+bool AudioPolicyManager::isDeviceOfModule(
+        const sp<DeviceDescriptor>& devDesc, const char *moduleId) const {
+    sp<HwModule> module = mHwModules.getModuleFromName(moduleId);
+    if (module != 0) {
+        return mAvailableOutputDevices.getDevicesFromHwModule(module->getHandle())
+                .indexOf(devDesc) != NAME_NOT_FOUND
+                || mAvailableInputDevices.getDevicesFromHwModule(module->getHandle())
+                .indexOf(devDesc) != NAME_NOT_FOUND;
+    }
+    return false;
+}
+
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
 {
     ALOGV("setPhoneState() state %d", state);
@@ -696,22 +712,25 @@
     ALOGV("setSystemProperty() property %s, value %s", property, value);
 }
 
-// Find a direct output profile compatible with the parameters passed, even if the input flags do
-// not explicitly request a direct output
-sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
-                                                               audio_devices_t device,
-                                                               uint32_t samplingRate,
-                                                               audio_format_t format,
-                                                               audio_channel_mask_t channelMask,
-                                                               audio_output_flags_t flags)
+// Find an output profile compatible with the parameters passed. When "directOnly" is set, restrict
+// search to profiles for direct outputs.
+sp<IOProfile> AudioPolicyManager::getProfileForOutput(
+                                                   audio_devices_t device,
+                                                   uint32_t samplingRate,
+                                                   audio_format_t format,
+                                                   audio_channel_mask_t channelMask,
+                                                   audio_output_flags_t flags,
+                                                   bool directOnly)
 {
-    // only retain flags that will drive the direct output profile selection
-    // if explicitly requested
-    static const uint32_t kRelevantFlags =
-            (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
-             AUDIO_OUTPUT_FLAG_VOIP_RX);
-    flags =
-        (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+    if (directOnly) {
+        // only retain flags that will drive the direct output profile selection
+        // if explicitly requested
+        static const uint32_t kRelevantFlags =
+                (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
+                 AUDIO_OUTPUT_FLAG_VOIP_RX);
+        flags =
+            (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
 
     sp<IOProfile> profile;
 
@@ -728,7 +747,9 @@
             if ((mAvailableOutputDevices.types() & curProfile->getSupportedDevicesType()) == 0) {
                 continue;
             }
-            // if several profiles are compatible, give priority to one with offload capability
+            if (!directOnly) return curProfile;
+            // when searching for direct outputs, if several profiles are compatible, give priority
+            // to one with offload capability
             if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
                 continue;
             }
@@ -753,7 +774,7 @@
     // and AudioSystem::getOutputSamplingRate().
 
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
-    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+    audio_io_handle_t output = selectOutput(outputs);
 
     ALOGV("getOutput() stream %d selected device %08x, output %d", stream, device, output);
     return output;
@@ -774,7 +795,8 @@
     routing_strategy strategy;
     audio_devices_t device;
     const audio_port_handle_t requestedDeviceId = *selectedDeviceId;
-    audio_devices_t msdDevice = getMsdAudioOutDeviceTypes();
+    audio_devices_t msdDevice =
+            getModuleDeviceTypes(mAvailableOutputDevices, AUDIO_HARDWARE_MODULE_ID_MSD);
 
     // The supplied portId must be AUDIO_PORT_HANDLE_NONE
     if (*portId != AUDIO_PORT_HANDLE_NONE) {
@@ -845,8 +867,9 @@
     // chosen by the engine if not.
     // FIXME: provide a more generic approach which is not device specific and move this back
     // to getOutputForDevice.
+    // TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
     if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
-        *stream == AUDIO_STREAM_MUSIC &&
+        (*stream == AUDIO_STREAM_MUSIC || attributes.usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
         audio_is_linear_pcm(config->format) &&
         isInCall()) {
         if (requestedDeviceId != AUDIO_PORT_HANDLE_NONE) {
@@ -935,7 +958,8 @@
     if (stream == AUDIO_STREAM_TTS) {
         *flags = AUDIO_OUTPUT_FLAG_TTS;
     } else if (stream == AUDIO_STREAM_VOICE_CALL &&
-               audio_is_linear_pcm(config->format)) {
+               audio_is_linear_pcm(config->format) &&
+               (*flags & AUDIO_OUTPUT_FLAG_INCALL_MUSIC) == 0) {
         *flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
                                        AUDIO_OUTPUT_FLAG_DIRECT);
         ALOGV("Set VoIP and Direct output flags for PCM format");
@@ -961,11 +985,12 @@
 
     if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
             !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
-        profile = getProfileForDirectOutput(device,
-                                           config->sample_rate,
-                                           config->format,
-                                           config->channel_mask,
-                                           (audio_output_flags_t)*flags);
+        profile = getProfileForOutput(device,
+                                   config->sample_rate,
+                                   config->format,
+                                   config->channel_mask,
+                                   (audio_output_flags_t)*flags,
+                                   true /* directOnly */);
     }
 
     if (profile != 0) {
@@ -1066,7 +1091,8 @@
 
         // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
         *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
-        output = selectOutput(outputs, *flags, config->format);
+        output = selectOutput(outputs, *flags, config->format,
+                config->channel_mask, config->sample_rate);
     }
     ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
             "sampling rate %d, format %#x, channels %#x, flags %#x",
@@ -1085,14 +1111,6 @@
     return 0;
 }
 
-audio_devices_t AudioPolicyManager::getMsdAudioOutDeviceTypes() const {
-    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
-    if (msdModule != 0) {
-        return mAvailableOutputDevices.getDeviceTypesFromHwModule(msdModule->getHandle());
-    }
-    return AUDIO_DEVICE_NONE;
-}
-
 const AudioPatchCollection AudioPolicyManager::getMsdPatches() const {
     AudioPatchCollection msdPatches;
     sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
@@ -1235,15 +1253,18 @@
 
 audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                                        audio_output_flags_t flags,
-                                                       audio_format_t format)
+                                                       audio_format_t format,
+                                                       audio_channel_mask_t channelMask,
+                                                       uint32_t samplingRate)
 {
     // select one output among several that provide a path to a particular device or set of
     // devices (the list was previously build by getOutputsForDevice()).
     // The priority is as follows:
-    // 1: the output with the highest number of requested policy flags
-    // 2: the output with the bit depth the closest to the requested one
-    // 3: the primary output
-    // 4: the first output in the list
+    // 1: the output supporting haptic playback when requesting haptic playback
+    // 2: the output with the highest number of requested policy flags
+    // 3: the output with the bit depth the closest to the requested one
+    // 4: the primary output
+    // 5: the first output in the list
 
     if (outputs.size() == 0) {
         return AUDIO_IO_HANDLE_NONE;
@@ -1253,6 +1274,8 @@
     }
 
     int maxCommonFlags = 0;
+    const size_t hapticChannelCount = audio_channel_count_from_out_mask(
+            channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
     audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
     audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
     audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
@@ -1265,6 +1288,24 @@
             if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
                 continue;
             }
+            // If haptic channel is specified, use the haptic output if present.
+            // When using haptic output, same audio format and sample rate are required.
+            if (hapticChannelCount > 0) {
+                // If haptic channel is specified, use the first output that
+                // support haptic playback.
+                if (audio_channel_count_from_out_mask(
+                        outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) >= hapticChannelCount
+                        && format == outputDesc->mFormat
+                        && samplingRate == outputDesc->mSamplingRate) {
+                    return output;
+                }
+            } else {
+                // When haptic channel is not specified, skip haptic output.
+                if (outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
+                    continue;
+                }
+            }
+
             // if a valid format is specified, skip output if not compatible
             if (format != AUDIO_FORMAT_INVALID) {
                 if (!audio_is_linear_pcm(format)) {
@@ -1878,7 +1919,38 @@
     }
 
     if (!profile->canOpenNewIo()) {
-        return AUDIO_IO_HANDLE_NONE;
+        for (size_t i = 0; i < mInputs.size(); ) {
+            sp <AudioInputDescriptor> desc = mInputs.valueAt(i);
+            if (desc->mProfile != profile) {
+                continue;
+            }
+            // if sound trigger, reuse input if used by other sound trigger on same session
+            // else
+            //    reuse input if active client app is not in IDLE state
+            //
+            RecordClientVector clients = desc->clientsList();
+            bool doClose = false;
+            for (const auto& client : clients) {
+                if (isSoundTrigger != client->isSoundTrigger()) {
+                    continue;
+                }
+                if (client->isSoundTrigger()) {
+                    if (session == client->session()) {
+                        return desc->mIoHandle;
+                    }
+                    continue;
+                }
+                if (client->active() && client->appState() != APP_STATE_IDLE) {
+                    return desc->mIoHandle;
+                }
+                doClose = true;
+            }
+            if (doClose) {
+                closeInput(desc->mIoHandle);
+            } else {
+                i++;
+            }
+        }
     }
 
     sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
@@ -1919,55 +1991,8 @@
     return input;
 }
 
-//static
-bool AudioPolicyManager::isConcurrentSource(audio_source_t source)
+status_t AudioPolicyManager::startInput(audio_port_handle_t portId)
 {
-    return (source == AUDIO_SOURCE_HOTWORD) ||
-            (source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
-            (source == AUDIO_SOURCE_FM_TUNER);
-}
-
-// FIXME: remove when concurrent capture is ready. This is a hack to work around bug b/63083537.
-bool AudioPolicyManager::soundTriggerSupportsConcurrentCapture() {
-    if (!mHasComputedSoundTriggerSupportsConcurrentCapture) {
-        bool soundTriggerSupportsConcurrentCapture = false;
-        unsigned int numModules = 0;
-        struct sound_trigger_module_descriptor* nModules = NULL;
-
-        status_t status = SoundTrigger::listModules(nModules, &numModules);
-        if (status == NO_ERROR && numModules != 0) {
-            nModules = (struct sound_trigger_module_descriptor*) calloc(
-                    numModules, sizeof(struct sound_trigger_module_descriptor));
-            if (nModules == NULL) {
-              // We failed to malloc the buffer, so just say no for now, and hope that we have more
-              // ram the next time this function is called.
-              ALOGE("Failed to allocate buffer for module descriptors");
-              return false;
-            }
-
-            status = SoundTrigger::listModules(nModules, &numModules);
-            if (status == NO_ERROR) {
-                soundTriggerSupportsConcurrentCapture = true;
-                for (size_t i = 0; i < numModules; ++i) {
-                    soundTriggerSupportsConcurrentCapture &=
-                            nModules[i].properties.concurrent_capture;
-                }
-            }
-            free(nModules);
-        }
-        mSoundTriggerSupportsConcurrentCapture = soundTriggerSupportsConcurrentCapture;
-        mHasComputedSoundTriggerSupportsConcurrentCapture = true;
-    }
-    return mSoundTriggerSupportsConcurrentCapture;
-}
-
-
-status_t AudioPolicyManager::startInput(audio_port_handle_t portId,
-                                        bool silenced,
-                                        concurrency_type__mask_t *concurrency)
-{
-    *concurrency = API_INPUT_CONCURRENCY_NONE;
-
     ALOGV("%s portId %d", __FUNCTION__, portId);
 
     sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
@@ -1984,106 +2009,16 @@
 
     audio_session_t session = client->session();
 
-    ALOGV("%s input:%d, session:%d, silenced:%d, concurrency:%d)",
-        __FUNCTION__, input, session, silenced, *concurrency);
+    ALOGV("%s input:%d, session:%d)", __FUNCTION__, input, session);
 
-    if (!is_virtual_input_device(inputDesc->mDevice)) {
-        if (mCallTxPatch != 0 &&
-            inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
-            ALOGW("startInput(%d) failed: call in progress", input);
-            *concurrency |= API_INPUT_CONCURRENCY_CALL;
-            return INVALID_OPERATION;
-        }
+    Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs();
 
-        Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs();
-
-        // If a UID is idle and records silence and another not silenced recording starts
-        // from another UID (idle or active) we stop the current idle UID recording in
-        // favor of the new one - "There can be only one" TM
-        if (!silenced) {
-            for (const auto& activeDesc : activeInputs) {
-                if ((activeDesc->getAudioPort()->getFlags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
-                        activeDesc->getId() == inputDesc->getId()) {
-                     continue;
-                }
-
-                RecordClientVector activeClients = activeDesc->clientsList(true /*activeOnly*/);
-                for (const auto& activeClient : activeClients) {
-                    if (activeClient->isSilenced()) {
-                        closeClient(activeClient->portId());
-                        ALOGV("%s client %d stopping silenced client %d", __FUNCTION__,
-                              portId, activeClient->portId());
-                        activeInputs = mInputs.getActiveInputs();
-                    }
-                }
-            }
-        }
-
-        for (const auto& activeDesc : activeInputs) {
-            if ((client->flags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
-                    activeDesc->getId() == inputDesc->getId()) {
-                continue;
-            }
-
-            audio_source_t activeSource = activeDesc->inputSource(true);
-            if (client->source() == AUDIO_SOURCE_HOTWORD) {
-                if (activeSource == AUDIO_SOURCE_HOTWORD) {
-                    if (activeDesc->hasPreemptedSession(session)) {
-                        ALOGW("%s input %d failed for HOTWORD: "
-                                "other input %d already started for HOTWORD", __FUNCTION__,
-                              input, activeDesc->mIoHandle);
-                        *concurrency |= API_INPUT_CONCURRENCY_HOTWORD;
-                        return INVALID_OPERATION;
-                    }
-                } else {
-                    ALOGV("%s input %d failed for HOTWORD: other input %d already started",
-                        __FUNCTION__, input, activeDesc->mIoHandle);
-                    *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
-                    return INVALID_OPERATION;
-                }
-            } else {
-                if (activeSource != AUDIO_SOURCE_HOTWORD) {
-                    ALOGW("%s input %d failed: other input %d already started", __FUNCTION__,
-                          input, activeDesc->mIoHandle);
-                    *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
-                    return INVALID_OPERATION;
-                }
-            }
-        }
-
-        // We only need to check if the sound trigger session supports concurrent capture if the
-        // input is also a sound trigger input. Otherwise, we should preempt any hotword stream
-        // that's running.
-        const bool allowConcurrentWithSoundTrigger =
-            inputDesc->isSoundTrigger() ? soundTriggerSupportsConcurrentCapture() : false;
-
-        // if capture is allowed, preempt currently active HOTWORD captures
-        for (const auto& activeDesc : activeInputs) {
-            if (allowConcurrentWithSoundTrigger && activeDesc->isSoundTrigger()) {
-                continue;
-            }
-            RecordClientVector activeHotwordClients =
-                activeDesc->clientsList(true, AUDIO_SOURCE_HOTWORD);
-            if (activeHotwordClients.size() > 0) {
-                SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
-
-                for (const auto& activeClient : activeHotwordClients) {
-                    *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
-                    sessions.add(activeClient->session());
-                    closeClient(activeClient->portId());
-                    ALOGV("%s input %d for HOTWORD preempting HOTWORD input %d", __FUNCTION__,
-                          input, activeDesc->mIoHandle);
-                }
-
-                inputDesc->setPreemptedSessions(sessions);
-            }
-        }
+    status_t status = inputDesc->start();
+    if (status != NO_ERROR) {
+        return status;
     }
 
-    // Make sure we start with the correct silence state
-    client->setSilenced(silenced);
-
-    // increment activity count before calling getNewInputDevice() below as only active sessions
+  // increment activity count before calling getNewInputDevice() below as only active sessions
     // are considered for device selection
     inputDesc->setClientActive(client, true);
 
@@ -2092,12 +2027,6 @@
     audio_devices_t device = getNewInputDevice(inputDesc);
     setInputDevice(input, device, true /* force */);
 
-    status_t status = inputDesc->start();
-    if (status != NO_ERROR) {
-        inputDesc->setClientActive(client, false);
-        return status;
-    }
-
     if (inputDesc->activeCount()  == 1) {
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((inputDesc->mPolicyMix != NULL)
@@ -2316,7 +2245,7 @@
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
+        audio_devices_t curDevice = desc->device();
         for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
             if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
                 continue;
@@ -2334,7 +2263,7 @@
             bool applyVolume;
             if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
                 curStreamDevice |= device;
-                applyVolume = (curDevice & curStreamDevice) != 0;
+                applyVolume = (Volume::getDeviceForVolume(curDevice) & curStreamDevice) != 0;
             } else {
                 applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
                         stream, curStreamDevice);
@@ -2670,6 +2599,19 @@
     return res;
 }
 
+void AudioPolicyManager::dumpManualSurroundFormats(String8 *dst) const
+{
+    size_t i = 0;
+    constexpr size_t audioFormatPrefixLen = sizeof("AUDIO_FORMAT_");
+    for (const auto& fmt : mManualSurroundFormats) {
+        if (i++ != 0) dst->append(", ");
+        std::string sfmt;
+        FormatConverter::toString(fmt, sfmt);
+        dst->append(sfmt.size() >= audioFormatPrefixLen ?
+                sfmt.c_str() + audioFormatPrefixLen - 1 : sfmt.c_str());
+    }
+}
+
 void AudioPolicyManager::dump(String8 *dst) const
 {
     dst->appendFormat("\nAudioPolicyManager Dump: %p\n", this);
@@ -2683,8 +2625,15 @@
         "HDMI system audio", "encoded surround output", "vibrate ringing" };
     for (audio_policy_force_use_t i = AUDIO_POLICY_FORCE_FOR_COMMUNICATION;
          i < AUDIO_POLICY_FORCE_USE_CNT; i = (audio_policy_force_use_t)((int)i + 1)) {
-        dst->appendFormat(" Force use for %s: %d\n",
-                forceUses[i], mEngine->getForceUse(i));
+        audio_policy_forced_cfg_t forceUseValue = mEngine->getForceUse(i);
+        dst->appendFormat(" Force use for %s: %d", forceUses[i], forceUseValue);
+        if (i == AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND &&
+                forceUseValue == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
+            dst->append(" (MANUAL: ");
+            dumpManualSurroundFormats(dst);
+            dst->append(")");
+        }
+        dst->append("\n");
     }
     dst->appendFormat(" TTS output %savailable\n", mTtsOutputAvailable ? "" : "not ");
     dst->appendFormat(" Master mono: %s\n", mMasterMono ? "on" : "off");
@@ -2699,17 +2648,6 @@
     mAudioPatches.dump(dst);
     mPolicyMixes.dump(dst);
     mAudioSources.dump(dst);
-    if (!mSurroundFormats.empty()) {
-        dst->append("\nEnabled Surround Formats:\n");
-        size_t i = 0;
-        for (const auto& fmt : mSurroundFormats) {
-            dst->append(i++ == 0 ? "  " : ", ");
-            std::string sfmt;
-            FormatConverter::toString(fmt, sfmt);
-            dst->append(sfmt.c_str());
-        }
-        dst->append("\n");
-    }
 }
 
 status_t AudioPolicyManager::dump(int fd)
@@ -2783,15 +2721,34 @@
 
     // See if there is a profile to support this.
     // AUDIO_DEVICE_NONE
-    sp<IOProfile> profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */,
+    sp<IOProfile> profile = getProfileForOutput(AUDIO_DEVICE_NONE /*ignore device */,
                                             offloadInfo.sample_rate,
                                             offloadInfo.format,
                                             offloadInfo.channel_mask,
-                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,
+                                            true /* directOnly */);
     ALOGV("isOffloadSupported() profile %sfound", profile != 0 ? "" : "NOT ");
     return (profile != 0);
 }
 
+bool AudioPolicyManager::isDirectOutputSupported(const audio_config_base_t& config,
+                                                 const audio_attributes_t& attributes) {
+    audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
+    audio_attributes_flags_to_audio_output_flags(attributes.flags, output_flags);
+    sp<IOProfile> profile = getProfileForOutput(AUDIO_DEVICE_NONE /*ignore device */,
+                                            config.sample_rate,
+                                            config.format,
+                                            config.channel_mask,
+                                            output_flags,
+                                            true /* directOnly */);
+    ALOGV("%s() profile %sfound with name: %s, "
+        "sample rate: %u, format: 0x%x, channel_mask: 0x%x, output flags: 0x%x",
+        __FUNCTION__, profile != 0 ? "" : "NOT ",
+        (profile != 0 ? profile->getTagName().string() : "null"),
+        config.sample_rate, config.format, config.channel_mask, output_flags);
+    return (profile != 0);
+}
+
 status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
@@ -3103,9 +3060,7 @@
                                             getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
                     // if the sink device is reachable via an opened output stream, request to go via
                     // this output stream by adding a second source to the patch description
-                    audio_io_handle_t output = selectOutput(outputs,
-                                                            AUDIO_OUTPUT_FLAG_NONE,
-                                                            AUDIO_FORMAT_INVALID);
+                    audio_io_handle_t output = selectOutput(outputs);
                     if (output != AUDIO_IO_HANDLE_NONE) {
                         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
                         if (outputDesc->isDuplicated()) {
@@ -3346,7 +3301,7 @@
     SortedVector<audio_io_handle_t> inputsToClose;
     for (size_t i = 0; i < mInputs.size(); i++) {
         sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
-        if (affectedSources.indexOf(inputDesc->inputSource()) >= 0) {
+        if (affectedSources.indexOf(inputDesc->source()) >= 0) {
             inputsToClose.add(inputDesc->mIoHandle);
         }
     }
@@ -3449,8 +3404,7 @@
         //   create Hwoutput and add to mHwOutputs
     } else {
         SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(sinkDevice, mOutputs);
-        audio_io_handle_t output =
-                selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+        audio_io_handle_t output = selectOutput(outputs);
         if (output == AUDIO_IO_HANDLE_NONE) {
             ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevice);
             return INVALID_OPERATION;
@@ -3569,63 +3523,56 @@
             (surroundFormats == NULL || surroundFormatsEnabled == NULL))) {
         return BAD_VALUE;
     }
-    ALOGV("getSurroundFormats() numSurroundFormats %d surroundFormats %p surroundFormatsEnabled %p"
-            " reported %d", *numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
-
-    // Only return value if there is HDMI output.
-    if ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_HDMI) == 0) {
-        return INVALID_OPERATION;
-    }
+    ALOGV("%s() numSurroundFormats %d surroundFormats %p surroundFormatsEnabled %p reported %d",
+            __func__, *numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
 
     size_t formatsWritten = 0;
     size_t formatsMax = *numSurroundFormats;
-    *numSurroundFormats = 0;
-    std::unordered_set<audio_format_t> formats;
+    std::unordered_set<audio_format_t> formats; // Uses primary surround formats only
     if (reported) {
-        // Return formats from HDMI profiles, that have already been resolved by
+        // Return formats from all device profiles that have already been resolved by
         // checkOutputsForDevice().
-        DeviceVector hdmiOutputDevs = mAvailableOutputDevices.getDevicesFromTypeMask(
-              AUDIO_DEVICE_OUT_HDMI);
-        for (size_t i = 0; i < hdmiOutputDevs.size(); i++) {
-             FormatVector supportedFormats =
-                 hdmiOutputDevs[i]->getAudioPort()->getAudioProfiles().getSupportedFormats();
-             for (size_t j = 0; j < supportedFormats.size(); j++) {
-                 if (mConfig.getSurroundFormats().count(supportedFormats[j]) != 0) {
-                     formats.insert(supportedFormats[j]);
-                 } else {
-                     for (const auto& pair : mConfig.getSurroundFormats()) {
-                         if (pair.second.count(supportedFormats[j]) != 0) {
-                             formats.insert(pair.first);
-                             break;
-                         }
-                     }
-                 }
-             }
+        for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
+            sp<DeviceDescriptor> device = mAvailableOutputDevices[i];
+            FormatVector supportedFormats =
+                    device->getAudioPort()->getAudioProfiles().getSupportedFormats();
+            for (size_t j = 0; j < supportedFormats.size(); j++) {
+                if (mConfig.getSurroundFormats().count(supportedFormats[j]) != 0) {
+                    formats.insert(supportedFormats[j]);
+                } else {
+                    for (const auto& pair : mConfig.getSurroundFormats()) {
+                        if (pair.second.count(supportedFormats[j]) != 0) {
+                            formats.insert(pair.first);
+                            break;
+                        }
+                    }
+                }
+            }
         }
     } else {
         for (const auto& pair : mConfig.getSurroundFormats()) {
             formats.insert(pair.first);
         }
     }
+    *numSurroundFormats = formats.size();
+    audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
     for (const auto& format: formats) {
         if (formatsWritten < formatsMax) {
             surroundFormats[formatsWritten] = format;
-            bool formatEnabled = false;
-            if (mConfig.getSurroundFormats().count(format) == 0) {
-                // Check sub-formats
-                for (const auto& pair : mConfig.getSurroundFormats()) {
-                    for (const auto& subformat : pair.second) {
-                        formatEnabled = mSurroundFormats.count(subformat) != 0;
-                        if (formatEnabled) break;
-                    }
-                    if (formatEnabled) break;
-                }
-            } else {
-                formatEnabled = mSurroundFormats.count(format) != 0;
+            bool formatEnabled = true;
+            switch (forceUse) {
+                case AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL:
+                    formatEnabled = mManualSurroundFormats.count(format) != 0;
+                    break;
+                case AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER:
+                    formatEnabled = false;
+                    break;
+                default: // AUTO or ALWAYS => true
+                    break;
             }
             surroundFormatsEnabled[formatsWritten++] = formatEnabled;
         }
-        (*numSurroundFormats)++;
     }
     return NO_ERROR;
 }
@@ -3633,41 +3580,32 @@
 status_t AudioPolicyManager::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
 {
     ALOGV("%s() format 0x%X enabled %d", __func__, audioFormat, enabled);
-    // Check if audio format is a surround formats.
     const auto& formatIter = mConfig.getSurroundFormats().find(audioFormat);
     if (formatIter == mConfig.getSurroundFormats().end()) {
         ALOGW("%s() format 0x%X is not a known surround format", __func__, audioFormat);
         return BAD_VALUE;
     }
 
-    // Should only be called when MANUAL.
-    audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
-                AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
-    if (forceUse != AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
+    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND) !=
+            AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
         ALOGW("%s() not in manual mode for surround sound format selection", __func__);
         return INVALID_OPERATION;
     }
 
-    if ((mSurroundFormats.count(audioFormat) != 0) == enabled) {
+    if ((mManualSurroundFormats.count(audioFormat) != 0) == enabled) {
         return NO_ERROR;
     }
 
-    // The operation is valid only when there is HDMI output available.
-    if ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_HDMI) == 0) {
-        ALOGW("%s() no HDMI out devices found", __func__);
-        return INVALID_OPERATION;
-    }
-
-    std::unordered_set<audio_format_t> surroundFormatsBackup(mSurroundFormats);
+    std::unordered_set<audio_format_t> surroundFormatsBackup(mManualSurroundFormats);
     if (enabled) {
-        mSurroundFormats.insert(audioFormat);
+        mManualSurroundFormats.insert(audioFormat);
         for (const auto& subFormat : formatIter->second) {
-            mSurroundFormats.insert(subFormat);
+            mManualSurroundFormats.insert(subFormat);
         }
     } else {
-        mSurroundFormats.erase(audioFormat);
+        mManualSurroundFormats.erase(audioFormat);
         for (const auto& subFormat : formatIter->second) {
-            mSurroundFormats.erase(subFormat);
+            mManualSurroundFormats.erase(subFormat);
         }
     }
 
@@ -3692,6 +3630,7 @@
                                              name.c_str());
         profileUpdated |= (status == NO_ERROR);
     }
+    // FIXME: Why doing this for input HDMI devices if we don't augment their reported formats?
     DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromTypeMask(
                 AUDIO_DEVICE_IN_HDMI);
     for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
@@ -3714,7 +3653,7 @@
 
     if (!profileUpdated) {
         ALOGW("%s() no audio profiles updated, undoing surround formats change", __func__);
-        mSurroundFormats = std::move(surroundFormatsBackup);
+        mManualSurroundFormats = std::move(surroundFormatsBackup);
     }
 
     return profileUpdated ? NO_ERROR : INVALID_OPERATION;
@@ -3723,21 +3662,37 @@
 void AudioPolicyManager::setAppState(uid_t uid, app_state_t state)
 {
     Vector<sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
-    bool silenced = state == APP_STATE_IDLE;
 
-    ALOGV("AudioPolicyManager:setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
+    ALOGV("%s(uid:%d, state:%d)", __func__, uid, state);
 
     for (size_t i = 0; i < activeInputs.size(); i++) {
         sp<AudioInputDescriptor> activeDesc = activeInputs[i];
         RecordClientVector clients = activeDesc->clientsList(true /*activeOnly*/);
         for (const auto& client : clients) {
             if (uid == client->uid()) {
-                client->setSilenced(silenced);
+                client->setAppState(state);
             }
         }
     }
 }
 
+bool AudioPolicyManager::isHapticPlaybackSupported()
+{
+    for (const auto& hwModule : mHwModules) {
+        const OutputProfileCollection &outputProfiles = hwModule->getOutputProfiles();
+        for (const auto &outProfile : outputProfiles) {
+            struct audio_port audioPort;
+            outProfile->toAudioPort(&audioPort);
+            for (size_t i = 0; i < audioPort.num_channel_masks; i++) {
+                if (audioPort.channel_masks[i] & AUDIO_CHANNEL_HAPTIC_ALL) {
+                    return true;
+                }
+            }
+        }
+    }
+    return false;
+}
+
 status_t AudioPolicyManager::disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
     ALOGV("%s port Id %d", __FUNCTION__, sourceDesc->portId());
@@ -3794,7 +3749,6 @@
     return mAudioPortGeneration++;
 }
 
-#ifdef USE_XML_AUDIO_POLICY_CONF
 // Treblized audio policy xml config will be located in /odm/etc or /vendor/etc.
 static const char *kConfigLocationList[] =
         {"/odm/etc", "/vendor/etc", "/system/etc"};
@@ -3826,7 +3780,6 @@
     }
     return ret;
 }
-#endif
 
 AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface,
                                        bool /*forTesting*/)
@@ -3835,23 +3788,16 @@
     mpClientInterface(clientInterface),
     mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
     mA2dpSuspended(false),
-#ifdef USE_XML_AUDIO_POLICY_CONF
     mVolumeCurves(new VolumeCurvesCollection()),
     mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
             mDefaultOutputDevice, static_cast<VolumeCurvesCollection*>(mVolumeCurves.get())),
-#else
-    mVolumeCurves(new StreamDescriptorCollection()),
-    mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
-            mDefaultOutputDevice),
-#endif
     mAudioPortGeneration(1),
     mBeaconMuteRefCount(0),
     mBeaconPlayingRefCount(0),
     mBeaconMuted(false),
     mTtsOutputAvailable(false),
     mMasterMono(false),
-    mMusicEffectOutput(AUDIO_IO_HANDLE_NONE),
-    mHasComputedSoundTriggerSupportsConcurrentCapture(false)
+    mMusicEffectOutput(AUDIO_IO_HANDLE_NONE)
 {
 }
 
@@ -3862,13 +3808,16 @@
     initialize();
 }
 
-void AudioPolicyManager::loadConfig() {
-#ifdef USE_XML_AUDIO_POLICY_CONF
-    if (deserializeAudioPolicyXmlConfig(getConfig()) != NO_ERROR) {
-#else
-    if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, getConfig()) != NO_ERROR)
-           && (ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, getConfig()) != NO_ERROR)) {
+//  This check is to catch any legacy platform updating to Q without having
+//  switched to XML since its deprecation on O.
+// TODO: after Q release, remove this check and flag as XML is now the only
+//        option and all legacy platform should have transitioned to XML.
+#ifndef USE_XML_AUDIO_POLICY_CONF
+#error Audio policy no longer supports legacy .conf configuration format
 #endif
+
+void AudioPolicyManager::loadConfig() {
+    if (deserializeAudioPolicyXmlConfig(getConfig()) != NO_ERROR) {
         ALOGE("could not load audio policy configuration file, setting defaults");
         getConfig().setDefault();
     }
@@ -4097,7 +4046,7 @@
    mInputs.clear();
    mHwModules.clear();
    mHwModulesAll.clear();
-   mSurroundFormats.clear();
+   mManualSurroundFormats.clear();
 }
 
 status_t AudioPolicyManager::initCheck()
@@ -4235,7 +4184,7 @@
                     mpClientInterface->setParameters(output, String8(param));
                     free(param);
                 }
-                updateAudioProfiles(device, output, profile->getAudioProfiles());
+                updateAudioProfiles(devDesc, output, profile->getAudioProfiles());
                 if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkOutputsForDevice() missing param");
                     desc->close();
@@ -4443,7 +4392,7 @@
                     mpClientInterface->setParameters(input, String8(param));
                     free(param);
                 }
-                updateAudioProfiles(device, input, profile->getAudioProfiles());
+                updateAudioProfiles(devDesc, input, profile->getAudioProfiles());
                 if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkInputsForDevice() direct input missing param");
                     desc->close();
@@ -4563,7 +4512,7 @@
 
     // MSD patches may have been released to support a non-MSD direct output. Reset MSD patch if
     // no direct outputs are open.
-    if (getMsdAudioOutDeviceTypes() != AUDIO_DEVICE_NONE) {
+    if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
         bool directOutputOpen = false;
         for (size_t i = 0; i < mOutputs.size(); i++) {
             if (mOutputs[i]->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
@@ -4901,7 +4850,7 @@
 
     // If we are not in call and no client is active on this input, this methods returns
     // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
-    audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
+    audio_source_t source = inputDesc->source();
     if (source == AUDIO_SOURCE_DEFAULT && isInCall()) {
         source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     }
@@ -5240,20 +5189,6 @@
             }
             installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
         }
-
-        // inform all input as well
-        for (size_t i = 0; i < mInputs.size(); i++) {
-            const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
-            if (!is_virtual_input_device(inputDescriptor->mDevice)) {
-                AudioParameter inputCmd = AudioParameter();
-                ALOGV("%s: inform input %d of device:%d", __func__,
-                      inputDescriptor->mIoHandle, device);
-                inputCmd.addInt(String8(AudioParameter::keyRouting),device);
-                mpClientInterface->setParameters(inputDescriptor->mIoHandle,
-                                                 inputCmd.toString(),
-                                                 delayMs);
-            }
-        }
     }
 
     // update stream volumes according to new device
@@ -5796,127 +5731,61 @@
     }
 }
 
-// Modify the list of surround sound formats supported.
-void AudioPolicyManager::filterSurroundFormats(FormatVector *formatsPtr) {
-    FormatVector &formats = *formatsPtr;
-    // TODO Set this based on Config properties.
-    const bool alwaysForceAC3 = true;
+void AudioPolicyManager::modifySurroundFormats(
+        const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr) {
+    std::unordered_set<audio_format_t> enforcedSurround(
+            devDesc->encodedFormats().begin(), devDesc->encodedFormats().end());
+    std::unordered_set<audio_format_t> allSurround;  // A flat set of all known surround formats
+    for (const auto& pair : mConfig.getSurroundFormats()) {
+        allSurround.insert(pair.first);
+        for (const auto& subformat : pair.second) allSurround.insert(subformat);
+    }
 
     audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
     ALOGD("%s: forced use = %d", __FUNCTION__, forceUse);
+    // This is the resulting set of formats depending on the surround mode:
+    //   'all surround' = allSurround
+    //   'enforced surround' = enforcedSurround [may include IEC69137 which isn't raw surround fmt]
+    //   'non-surround' = not in 'all surround' and not in 'enforced surround'
+    //   'manual surround' = mManualSurroundFormats
+    // AUTO:   formats v 'enforced surround'
+    // ALWAYS: formats v 'all surround' v 'enforced surround'
+    // NEVER:  formats ^ 'non-surround'
+    // MANUAL: formats ^ ('non-surround' v 'manual surround' v (IEC69137 ^ 'enforced surround'))
 
-    // If MANUAL, keep the supported surround sound formats as current enabled ones.
+    std::unordered_set<audio_format_t> formatSet;
+    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL
+            || forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+        // formatSet is (formats ^ 'non-surround')
+        for (auto formatIter = formatsPtr->begin(); formatIter != formatsPtr->end(); ++formatIter) {
+            if (allSurround.count(*formatIter) == 0 && enforcedSurround.count(*formatIter) == 0) {
+                formatSet.insert(*formatIter);
+            }
+        }
+    } else {
+        formatSet.insert(formatsPtr->begin(), formatsPtr->end());
+    }
+    formatsPtr->clear();  // Re-filled from the formatSet at the end.
+
     if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
-        formats.clear();
-        for (auto it = mSurroundFormats.begin(); it != mSurroundFormats.end(); it++) {
-            formats.add(*it);
+        formatSet.insert(mManualSurroundFormats.begin(), mManualSurroundFormats.end());
+        // Enable IEC61937 when in MANUAL mode if it's enforced for this device.
+        if (enforcedSurround.count(AUDIO_FORMAT_IEC61937) != 0) {
+            formatSet.insert(AUDIO_FORMAT_IEC61937);
         }
-        // Always enable IEC61937 when in MANUAL mode.
-        formats.add(AUDIO_FORMAT_IEC61937);
-    } else { // NEVER, AUTO or ALWAYS
-        // Analyze original support for various formats.
-        bool supportsAC3 = false;
-        bool supportsOtherSurround = false;
-        bool supportsIEC61937 = false;
-        mSurroundFormats.clear();
-        for (ssize_t formatIndex = 0; formatIndex < (ssize_t)formats.size(); formatIndex++) {
-            audio_format_t format = formats[formatIndex];
-            switch (format) {
-                case AUDIO_FORMAT_AC3:
-                    supportsAC3 = true;
-                    break;
-                case AUDIO_FORMAT_E_AC3:
-                case AUDIO_FORMAT_DTS:
-                case AUDIO_FORMAT_DTS_HD:
-                    // If ALWAYS, remove all other surround formats here
-                    // since we will add them later.
-                    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
-                        formats.removeAt(formatIndex);
-                        formatIndex--;
-                    }
-                    supportsOtherSurround = true;
-                    break;
-                case AUDIO_FORMAT_IEC61937:
-                    supportsIEC61937 = true;
-                    break;
-                default:
-                    break;
-            }
+    } else if (forceUse != AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) { // AUTO or ALWAYS
+        if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+            formatSet.insert(allSurround.begin(), allSurround.end());
         }
-
-        // Modify formats based on surround preferences.
-        // If NEVER, remove support for surround formats.
-        if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
-            if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
-                // Remove surround sound related formats.
-                for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
-                    audio_format_t format = formats[formatIndex];
-                    switch(format) {
-                        case AUDIO_FORMAT_AC3:
-                        case AUDIO_FORMAT_E_AC3:
-                        case AUDIO_FORMAT_DTS:
-                        case AUDIO_FORMAT_DTS_HD:
-                        case AUDIO_FORMAT_IEC61937:
-                            formats.removeAt(formatIndex);
-                            break;
-                        default:
-                            formatIndex++; // keep it
-                            break;
-                    }
-                }
-                supportsAC3 = false;
-                supportsOtherSurround = false;
-                supportsIEC61937 = false;
-            }
-        } else { // AUTO or ALWAYS
-            // Most TVs support AC3 even if they do not report it in the EDID.
-            if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
-                    && !supportsAC3) {
-                formats.add(AUDIO_FORMAT_AC3);
-                supportsAC3 = true;
-            }
-
-            // If ALWAYS, add support for raw surround formats if all are missing.
-            // This assumes that if any of these formats are reported by the HAL
-            // then the report is valid and should not be modified.
-            if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
-                formats.add(AUDIO_FORMAT_E_AC3);
-                formats.add(AUDIO_FORMAT_DTS);
-                formats.add(AUDIO_FORMAT_DTS_HD);
-                supportsOtherSurround = true;
-            }
-
-            // Add support for IEC61937 if any raw surround supported.
-            // The HAL could do this but add it here, just in case.
-            if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
-                formats.add(AUDIO_FORMAT_IEC61937);
-                supportsIEC61937 = true;
-            }
-
-            // Add reported surround sound formats to enabled surround formats.
-            for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
-                audio_format_t format = formats[formatIndex];
-                switch(format) {
-                    case AUDIO_FORMAT_AC3:
-                    case AUDIO_FORMAT_E_AC3:
-                    case AUDIO_FORMAT_DTS:
-                    case AUDIO_FORMAT_DTS_HD:
-                    case AUDIO_FORMAT_AAC_LC:
-                    case AUDIO_FORMAT_DOLBY_TRUEHD:
-                    case AUDIO_FORMAT_E_AC3_JOC:
-                        mSurroundFormats.insert(format);
-                        break;
-                    default:
-                        break;
-                }
-            }
-        }
+        formatSet.insert(enforcedSurround.begin(), enforcedSurround.end());
+    }
+    for (const auto& format : formatSet) {
+        formatsPtr->push(format);
     }
 }
 
-// Modify the list of channel masks supported.
-void AudioPolicyManager::filterSurroundChannelMasks(ChannelsVector *channelMasksPtr) {
+void AudioPolicyManager::modifySurroundChannelMasks(ChannelsVector *channelMasksPtr) {
     ChannelsVector &channelMasks = *channelMasksPtr;
     audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
@@ -5946,16 +5815,17 @@
         // If not then add 5.1 support.
         if (!supports5dot1) {
             channelMasks.add(AUDIO_CHANNEL_OUT_5POINT1);
-            ALOGI("%s: force ALWAYS, so adding channelMask for 5.1 surround", __FUNCTION__);
+            ALOGI("%s: force MANUAL or ALWAYS, so adding channelMask for 5.1 surround", __func__);
         }
     }
 }
 
-void AudioPolicyManager::updateAudioProfiles(audio_devices_t device,
+void AudioPolicyManager::updateAudioProfiles(const sp<DeviceDescriptor>& devDesc,
                                              audio_io_handle_t ioHandle,
                                              AudioProfileVector &profiles)
 {
     String8 reply;
+    audio_devices_t device = devDesc->type();
 
     // Format MUST be checked first to update the list of AudioProfile
     if (profiles.hasDynamicFormat()) {
@@ -5969,8 +5839,9 @@
             return;
         }
         FormatVector formats = formatsFromString(reply.string());
-        if (device == AUDIO_DEVICE_OUT_HDMI) {
-            filterSurroundFormats(&formats);
+        if (device == AUDIO_DEVICE_OUT_HDMI
+                || isDeviceOfModule(devDesc, AUDIO_HARDWARE_MODULE_ID_MSD)) {
+            modifySurroundFormats(devDesc, &formats);
         }
         profiles.setFormats(formats);
     }
@@ -6002,8 +5873,9 @@
             if (repliedParameters.get(
                     String8(AudioParameter::keyStreamSupportedChannels), reply) == NO_ERROR) {
                 channelMasks = channelMasksFromString(reply.string());
-                if (device == AUDIO_DEVICE_OUT_HDMI) {
-                    filterSurroundChannelMasks(&channelMasks);
+                if (device == AUDIO_DEVICE_OUT_HDMI
+                        || isDeviceOfModule(devDesc, AUDIO_HARDWARE_MODULE_ID_MSD)) {
+                    modifySurroundChannelMasks(&channelMasks);
                 }
             }
         }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 0436b1d..d0708b8 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -134,9 +134,7 @@
                                          audio_port_handle_t *portId);
 
         // indicates to the audio policy manager that the input starts being used.
-        virtual status_t startInput(audio_port_handle_t portId,
-                                    bool silenced,
-                                    concurrency_type__mask_t *concurrency);
+        virtual status_t startInput(audio_port_handle_t portId);
 
         // indicates to the audio policy manager that the input stops being used.
         virtual status_t stopInput(audio_port_handle_t portId);
@@ -186,12 +184,17 @@
 
         virtual bool isSourceActive(audio_source_t source) const;
 
-        void dump(String8 *dst) const; // helper for dump(int fd)
+        // helpers for dump(int fd)
+        void dumpManualSurroundFormats(String8 *dst) const;
+        void dump(String8 *dst) const;
 
         status_t dump(int fd) override;
 
         virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
 
+        virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                             const audio_attributes_t& attributes);
+
         virtual status_t listAudioPorts(audio_port_role_t role,
                                         audio_port_type_t type,
                                         unsigned int *num_ports,
@@ -244,6 +247,8 @@
 
         virtual void setAppState(uid_t uid, app_state_t state);
 
+        virtual bool isHapticPlaybackSupported();
+
 protected:
         // A constructor that allows more fine-grained control over initialization process,
         // used in automatic tests.
@@ -470,8 +475,10 @@
                                             uint32_t delayMs);
 
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
-                                       audio_output_flags_t flags,
-                                       audio_format_t format);
+                                       audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                       audio_format_t format = AUDIO_FORMAT_INVALID,
+                                       audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE,
+                                       uint32_t samplingRate = 0);
         // samplingRate, format, channelMask are in/out and so may be modified
         sp<IOProfile> getInputProfile(audio_devices_t device,
                                       const String8& address,
@@ -479,11 +486,12 @@
                                       audio_format_t& format,
                                       audio_channel_mask_t& channelMask,
                                       audio_input_flags_t flags);
-        sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
-                                                       uint32_t samplingRate,
-                                                       audio_format_t format,
-                                                       audio_channel_mask_t channelMask,
-                                                       audio_output_flags_t flags);
+        sp<IOProfile> getProfileForOutput(audio_devices_t device,
+                                          uint32_t samplingRate,
+                                          audio_format_t format,
+                                          audio_channel_mask_t channelMask,
+                                          audio_output_flags_t flags,
+                                          bool directOnly);
 
         audio_io_handle_t selectOutputForMusicEffects();
 
@@ -516,6 +524,9 @@
         sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
         sp<DeviceDescriptor> findDevice(
                 const DeviceVector& devices, audio_devices_t device) const;
+        audio_devices_t getModuleDeviceTypes(
+                const DeviceVector& devices, const char *moduleId) const;
+        bool isDeviceOfModule(const sp<DeviceDescriptor>& devDesc, const char *moduleId) const;
 
         status_t startSource(const sp<SwAudioOutputDescriptor>& outputDesc,
                              const sp<TrackClientDescriptor>& client,
@@ -539,8 +550,6 @@
 
         void clearAudioSources(uid_t uid);
 
-        static bool isConcurrentSource(audio_source_t source);
-
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
 
@@ -608,16 +617,16 @@
         // Audio Policy Engine Interface.
         AudioPolicyManagerInterface *mEngine;
 
-        // Surround formats that are enabled.
-        std::unordered_set<audio_format_t> mSurroundFormats;
+        // Surround formats that are enabled manually. Taken into account when
+        // "encoded surround" is forced into "manual" mode.
+        std::unordered_set<audio_format_t> mManualSurroundFormats;
 private:
         // Add or remove AC3 DTS encodings based on user preferences.
-        void filterSurroundFormats(FormatVector *formatsPtr);
-        void filterSurroundChannelMasks(ChannelsVector *channelMasksPtr);
+        void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
+        void modifySurroundChannelMasks(ChannelsVector *channelMasksPtr);
 
         // Support for Multi-Stream Decoder (MSD) module
         sp<DeviceDescriptor> getMsdAudioInDevice() const;
-        audio_devices_t getMsdAudioOutDeviceTypes() const;
         const AudioPatchCollection getMsdPatches() const;
         status_t getBestMsdAudioProfileFor(audio_devices_t outputDevice,
                                            bool hwAvSync,
@@ -627,7 +636,7 @@
         status_t setMsdPatch(audio_devices_t outputDevice = AUDIO_DEVICE_NONE);
 
         // If any, resolve any "dynamic" fields of an Audio Profiles collection
-        void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
+        void updateAudioProfiles(const sp<DeviceDescriptor>& devDesc, audio_io_handle_t ioHandle,
                 AudioProfileVector &profiles);
 
         // Notify the policy client of any change of device state with AUDIO_IO_HANDLE_NONE,
@@ -704,10 +713,6 @@
                 int delayMs,
                 uid_t uid,
                 sp<AudioPatch> *patchDescPtr);
-
-        bool soundTriggerSupportsConcurrentCapture();
-        bool mSoundTriggerSupportsConcurrentCapture;
-        bool mHasComputedSoundTriggerSupportsConcurrentCapture;
 };
 
 };
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 59c8f10..439764b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -454,23 +454,6 @@
     return rawbuffer;
 }
 
-static std::string audioConcurrencyString(
-        AudioPolicyInterface::concurrency_type__mask_t concurrency)
-{
-    char buffer[64]; // oversized
-    if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL) {
-        snprintf(buffer, sizeof(buffer), "%s%s%s%s",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL)? ",call":"",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE)? ",capture":"",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_HOTWORD)? ",hotword":"",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_PREEMPT)? ",preempt":"");
-    } else {
-        snprintf(buffer, sizeof(buffer), ",none");
-    }
-
-    return &buffer[1];
-}
-
 std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
     std::string typeStr;
     struct audio_port port = {};
@@ -482,7 +465,7 @@
     return typeStr;
 }
 
-status_t AudioPolicyService::startInput(audio_port_handle_t portId, bool *silenced)
+status_t AudioPolicyService::startInput(audio_port_handle_t portId)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -505,17 +488,16 @@
         return PERMISSION_DENIED;
     }
 
-    // If UID inactive it records silence until becoming active
-    *silenced = !mUidPolicy->isUidActive(client->uid) && !client->isVirtualDevice;
-
     Mutex::Autolock _l(mLock);
-    AudioPolicyInterface::concurrency_type__mask_t concurrency =
-            AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE;
+
+    client->active = true;
+    client->startTimeNs = systemTime();
+    updateUidStates_l();
 
     status_t status;
     {
         AutoCallerClear acc;
-        status = mAudioPolicyManager->startInput(portId, *silenced, &concurrency);
+        status = mAudioPolicyManager->startInput(portId);
 
     }
 
@@ -524,7 +506,6 @@
 
         static constexpr char kAudioPolicy[] = "audiopolicy";
 
-        static constexpr char kAudioPolicyReason[] = "android.media.audiopolicy.reason";
         static constexpr char kAudioPolicyStatus[] = "android.media.audiopolicy.status";
         static constexpr char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
         static constexpr char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
@@ -541,7 +522,6 @@
         MediaAnalyticsItem *item = new MediaAnalyticsItem(kAudioPolicy);
         if (item != NULL) {
 
-            item->setCString(kAudioPolicyReason, audioConcurrencyString(concurrency).c_str());
             item->setInt32(kAudioPolicyStatus, status);
 
             item->setCString(kAudioPolicyRqstSrc,
@@ -556,54 +536,35 @@
             item->setCString(
                     kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
 
-            // figure out who is active
-            // NB: might the other party have given up the microphone since then? how sure.
-            // perhaps could have given up on it.
-            // we hold mLock, so perhaps we're safe for this looping
-            if (concurrency != AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE) {
-                int count = mAudioRecordClients.size();
-                for (int i = 0; i<count ; i++) {
-                    if (portId == mAudioRecordClients.keyAt(i)) {
-                        continue;
+            int count = mAudioRecordClients.size();
+            for (int i = 0; i < count ; i++) {
+                if (portId == mAudioRecordClients.keyAt(i)) {
+                    continue;
+                }
+                sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
+                if (other->active) {
+                    // keeps the last of the clients marked active
+                    item->setCString(kAudioPolicyActiveSrc,
+                                     audioSourceString(other->attributes.source).c_str());
+                    item->setInt32(kAudioPolicyActiveSession, other->session);
+                    if (other->opPackageName.size() != 0) {
+                        item->setCString(kAudioPolicyActivePkg,
+                             std::string(String8(other->opPackageName).string()).c_str());
+                    } else {
+                        item->setCString(kAudioPolicyRqstPkg,
+                                         std::to_string(other->uid).c_str());
                     }
-                    sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
-                    if (other->active) {
-                        // keeps the last of the clients marked active
-                        item->setCString(kAudioPolicyActiveSrc,
-                                         audioSourceString(other->attributes.source).c_str());
-                        item->setInt32(kAudioPolicyActiveSession, other->session);
-                        if (other->opPackageName.size() != 0) {
-                            item->setCString(kAudioPolicyActivePkg,
-                                 std::string(String8(other->opPackageName).string()).c_str());
-                        } else {
-                            item->setCString(kAudioPolicyRqstPkg,
-                                             std::to_string(other->uid).c_str());
-                        }
-                        item->setCString(kAudioPolicyActiveDevice,
-                                         getDeviceTypeStrForPortId(other->deviceId).c_str());
-                    }
+                    item->setCString(kAudioPolicyActiveDevice,
+                                     getDeviceTypeStrForPortId(other->deviceId).c_str());
                 }
             }
             item->selfrecord();
             delete item;
             item = NULL;
         }
-    }
-
-    if (status == NO_ERROR) {
-        LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
-                            "startInput(): invalid concurrency type %d", (int)concurrency);
-
-        // enforce permission (if any) required for each type of concurrency
-        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL) {
-            //TODO: check incall capture permission
-        }
-        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
-            //TODO: check concurrent capture permission
-        }
-
-        client->active = true;
-    } else {
+        client->active = false;
+        client->startTimeNs = 0;
+        updateUidStates_l();
         finishRecording(client->opPackageName, client->uid);
     }
 
@@ -615,6 +576,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+
     Mutex::Autolock _l(mLock);
 
     ssize_t index = mAudioRecordClients.indexOfKey(portId);
@@ -624,6 +586,9 @@
     sp<AudioRecordClient> client = mAudioRecordClients.valueAt(index);
 
     client->active = false;
+    client->startTimeNs = 0;
+
+    updateUidStates_l();
 
     // finish the recording app op
     finishRecording(client->opPackageName, client->uid);
@@ -646,6 +611,14 @@
             return;
         }
         client = mAudioRecordClients.valueAt(index);
+
+        if (client->active) {
+            ALOGW("%s releasing active client portId %d", __FUNCTION__, portId);
+            client->active = false;
+            client->startTimeNs = 0;
+            updateUidStates_l();
+        }
+
         mAudioRecordClients.removeItem(portId);
     }
     if (client == 0) {
@@ -936,6 +909,17 @@
     return mAudioPolicyManager->isOffloadSupported(info);
 }
 
+bool AudioPolicyService::isDirectOutputSupported(const audio_config_base_t& config,
+                                                 const audio_attributes_t& attributes) {
+    if (mAudioPolicyManager == NULL) {
+        ALOGV("mAudioPolicyManager == NULL");
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->isDirectOutputSupported(config, attributes);
+}
+
+
 status_t AudioPolicyService::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
@@ -1149,4 +1133,15 @@
     return NO_ERROR;
 }
 
+bool AudioPolicyService::isHapticPlaybackSupported()
+{
+    if (mAudioPolicyManager == NULL) {
+        ALOGW("%s, mAudioPolicyManager == NULL", __func__);
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    return mAudioPolicyManager->isHapticPlaybackSupported();
+}
+
 } // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 018d9e3..ee5d6ff 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -340,14 +340,146 @@
     return NO_ERROR;
 }
 
-void AudioPolicyService::setAppState(uid_t uid, app_state_t state)
+void AudioPolicyService::updateUidStates()
 {
-    {
-        Mutex::Autolock _l(mLock);
-        if (mAudioPolicyManager) {
-            AutoCallerClear acc;
-            mAudioPolicyManager->setAppState(uid, state);
+    Mutex::Autolock _l(mLock);
+    updateUidStates_l();
+}
+
+void AudioPolicyService::updateUidStates_l()
+{
+//    Go over all active clients and allow capture (does not force silence) in the
+//    following cases:
+//    The client is the assistant
+//        AND an accessibility service is on TOP
+//               AND the source is VOICE_RECOGNITION or HOTWORD
+//        OR uses VOICE_RECOGNITION AND is on TOP OR latest started
+//               OR uses HOTWORD
+//            AND there is no privacy sensitive active capture
+//    OR The client is an accessibility service
+//        AND is on TOP OR latest started
+//        AND the source is VOICE_RECOGNITION or HOTWORD
+//    OR Any other client
+//        AND The assistant is not on TOP
+//        AND is on TOP OR latest started
+//        AND there is no privacy sensitive active capture
+//TODO: mamanage pre processing effects according to use case priority
+
+    sp<AudioRecordClient> topActive;
+    sp<AudioRecordClient> latestActive;
+    sp<AudioRecordClient> latestSensitiveActive;
+    nsecs_t topStartNs = 0;
+    nsecs_t latestStartNs = 0;
+    nsecs_t latestSensitiveStartNs = 0;
+    bool isA11yOnTop = mUidPolicy->isA11yOnTop();
+    bool isAssistantOnTop = false;
+    bool isSensitiveActive = false;
+
+    for (size_t i =0; i < mAudioRecordClients.size(); i++) {
+        sp<AudioRecordClient> current = mAudioRecordClients[i];
+        if (!current->active) continue;
+        if (isPrivacySensitive(current->attributes.source)) {
+            if (current->startTimeNs > latestSensitiveStartNs) {
+                latestSensitiveActive = current;
+                latestSensitiveStartNs = current->startTimeNs;
+            }
+            isSensitiveActive = true;
         }
+        if (mUidPolicy->getUidState(current->uid) == ActivityManager::PROCESS_STATE_TOP) {
+            if (current->startTimeNs > topStartNs) {
+                topActive = current;
+                topStartNs = current->startTimeNs;
+            }
+            if (mUidPolicy->isAssistantUid(current->uid)) {
+                isAssistantOnTop = true;
+            }
+        }
+        if (current->startTimeNs > latestStartNs) {
+            latestActive = current;
+            latestStartNs = current->startTimeNs;
+        }
+    }
+
+    if (topActive == nullptr && latestActive == nullptr) {
+        return;
+    }
+
+    if (topActive != nullptr) {
+        latestActive = nullptr;
+    }
+
+    for (size_t i =0; i < mAudioRecordClients.size(); i++) {
+        sp<AudioRecordClient> current = mAudioRecordClients[i];
+        if (!current->active) continue;
+
+        audio_source_t source = current->attributes.source;
+        bool isOnTop = current == topActive;
+        bool isLatest = current == latestActive;
+        bool isLatestSensitive = current == latestSensitiveActive;
+        bool forceIdle = true;
+        if (mUidPolicy->isAssistantUid(current->uid)) {
+            if (isA11yOnTop) {
+                if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
+                    forceIdle = false;
+                }
+            } else {
+                if ((((isOnTop || isLatest) && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+                     source == AUDIO_SOURCE_HOTWORD) && !isSensitiveActive) {
+                    forceIdle = false;
+                }
+            }
+        } else if (mUidPolicy->isA11yUid(current->uid)) {
+            if ((isOnTop || isLatest) &&
+                (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
+                forceIdle = false;
+            }
+        } else {
+            if (!isAssistantOnTop && (isOnTop || isLatest) &&
+                (!isSensitiveActive || isLatestSensitive)) {
+                forceIdle = false;
+            }
+        }
+        setAppState_l(current->uid,
+                      forceIdle ? APP_STATE_IDLE :
+                                  apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
+    }
+}
+
+/* static */
+app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
+    switch (amState) {
+    case ActivityManager::PROCESS_STATE_UNKNOWN:
+        return APP_STATE_IDLE;
+    case ActivityManager::PROCESS_STATE_TOP:
+        return APP_STATE_TOP;
+    default:
+        break;
+    }
+    return APP_STATE_FOREGROUND;
+}
+
+/* static */
+bool AudioPolicyService::isPrivacySensitive(audio_source_t source)
+{
+    switch (source) {
+        case AUDIO_SOURCE_VOICE_UPLINK:
+        case AUDIO_SOURCE_VOICE_DOWNLINK:
+        case AUDIO_SOURCE_VOICE_CALL:
+        case AUDIO_SOURCE_CAMCORDER:
+        case AUDIO_SOURCE_VOICE_COMMUNICATION:
+            return true;
+        default:
+            break;
+    }
+    return false;
+}
+
+void AudioPolicyService::setAppState_l(uid_t uid, app_state_t state)
+{
+    AutoCallerClear acc;
+
+    if (mAudioPolicyManager) {
+        mAudioPolicyManager->setAppState(uid, state);
     }
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af) {
@@ -511,7 +643,8 @@
     ActivityManager am;
     am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
             | ActivityManager::UID_OBSERVER_IDLE
-            | ActivityManager::UID_OBSERVER_ACTIVE,
+            | ActivityManager::UID_OBSERVER_ACTIVE
+            | ActivityManager::UID_OBSERVER_PROCSTATE,
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("audioserver"));
     status_t res = am.linkToDeath(this);
@@ -520,6 +653,7 @@
         mObserverRegistered = true;
     } else {
         ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
+
         am.unregisterUidObserver(this);
     }
 }
@@ -538,8 +672,7 @@
     mObserverRegistered = false;
 }
 
-bool AudioPolicyService::UidPolicy::isUidActive(uid_t uid) {
-    if (isServiceUid(uid)) return true;
+void AudioPolicyService::UidPolicy::checkRegistered() {
     bool needToReregister = false;
     {
         Mutex::Autolock _l(mLock);
@@ -549,94 +682,176 @@
         // Looks like ActivityManager has died previously, attempt to re-register.
         registerSelf();
     }
+}
+
+bool AudioPolicyService::UidPolicy::isUidActive(uid_t uid) {
+    if (isServiceUid(uid)) return true;
+    checkRegistered();
     {
         Mutex::Autolock _l(mLock);
         auto overrideIter = mOverrideUids.find(uid);
         if (overrideIter != mOverrideUids.end()) {
-            return overrideIter->second;
+            return overrideIter->second.first;
         }
         // In an absense of the ActivityManager, assume everything to be active.
         if (!mObserverRegistered) return true;
         auto cacheIter = mCachedUids.find(uid);
         if (cacheIter != mCachedUids.end()) {
-            return cacheIter->second;
+            return cacheIter->second.first;
         }
     }
     ActivityManager am;
     bool active = am.isUidActive(uid, String16("audioserver"));
     {
         Mutex::Autolock _l(mLock);
-        mCachedUids.insert(std::pair<uid_t, bool>(uid, active));
+        mCachedUids.insert(std::pair<uid_t,
+                           std::pair<bool, int>>(uid, std::pair<bool, int>(active,
+                                                      ActivityManager::PROCESS_STATE_UNKNOWN)));
     }
     return active;
 }
 
+int AudioPolicyService::UidPolicy::getUidState(uid_t uid) {
+    if (isServiceUid(uid)) {
+        return ActivityManager::PROCESS_STATE_TOP;
+    }
+    checkRegistered();
+    {
+        Mutex::Autolock _l(mLock);
+        auto overrideIter = mOverrideUids.find(uid);
+        if (overrideIter != mOverrideUids.end()) {
+            if (overrideIter->second.first) {
+                if (overrideIter->second.second != ActivityManager::PROCESS_STATE_UNKNOWN) {
+                    return overrideIter->second.second;
+                } else {
+                    auto cacheIter = mCachedUids.find(uid);
+                    if (cacheIter != mCachedUids.end()) {
+                        return cacheIter->second.second;
+                    }
+                }
+            }
+            return ActivityManager::PROCESS_STATE_UNKNOWN;
+        }
+        // In an absense of the ActivityManager, assume everything to be active.
+        if (!mObserverRegistered) {
+            return ActivityManager::PROCESS_STATE_TOP;
+        }
+        auto cacheIter = mCachedUids.find(uid);
+        if (cacheIter != mCachedUids.end()) {
+            if (cacheIter->second.first) {
+                return cacheIter->second.second;
+            } else {
+                return ActivityManager::PROCESS_STATE_UNKNOWN;
+            }
+        }
+    }
+    ActivityManager am;
+    bool active = am.isUidActive(uid, String16("audioserver"));
+    int state = ActivityManager::PROCESS_STATE_UNKNOWN;
+    if (active) {
+        state = am.getUidProcessState(uid, String16("audioserver"));
+    }
+    {
+        Mutex::Autolock _l(mLock);
+        mCachedUids.insert(std::pair<uid_t,
+                           std::pair<bool, int>>(uid, std::pair<bool, int>(active, state)));
+    }
+
+    return state;
+}
+
 void AudioPolicyService::UidPolicy::onUidActive(uid_t uid) {
-    updateUidCache(uid, true, true);
+    updateUid(&mCachedUids, uid, true, ActivityManager::PROCESS_STATE_UNKNOWN, true);
 }
 
 void AudioPolicyService::UidPolicy::onUidGone(uid_t uid, __unused bool disabled) {
-    updateUidCache(uid, false, false);
+    updateUid(&mCachedUids, uid, false, ActivityManager::PROCESS_STATE_UNKNOWN, false);
 }
 
 void AudioPolicyService::UidPolicy::onUidIdle(uid_t uid, __unused bool disabled) {
-    updateUidCache(uid, false, true);
+    updateUid(&mCachedUids, uid, false, ActivityManager::PROCESS_STATE_UNKNOWN, true);
 }
 
-void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
-    sp<AudioPolicyService> service = mService.promote();
-    if (service != nullptr) {
-        service->setAppState(uid, active ?
-                              APP_STATE_FOREGROUND :
-                              APP_STATE_IDLE);
+void AudioPolicyService::UidPolicy::onUidStateChanged(uid_t uid,
+                                                      int32_t procState,
+                                                      int64_t procStateSeq __unused) {
+    if (procState != ActivityManager::PROCESS_STATE_UNKNOWN) {
+        updateUid(&mCachedUids, uid, true, procState, true);
     }
 }
 
 void AudioPolicyService::UidPolicy::updateOverrideUid(uid_t uid, bool active, bool insert) {
-    if (isServiceUid(uid)) return;
-    bool wasOverridden = false, wasActive = false;
-    {
-        Mutex::Autolock _l(mLock);
-        updateUidLocked(&mOverrideUids, uid, active, insert, &wasOverridden, &wasActive);
-    }
-    if (!wasOverridden && insert) {
-        notifyService(uid, active);  // Started to override.
-    } else if (wasOverridden && !insert) {
-        notifyService(uid, isUidActive(uid));  // Override ceased, notify with ground truth.
-    } else if (wasActive != active) {
-        notifyService(uid, active);  // Override updated.
+    updateUid(&mOverrideUids, uid, active, ActivityManager::PROCESS_STATE_UNKNOWN, insert);
+}
+
+void AudioPolicyService::UidPolicy::notifyService() {
+    sp<AudioPolicyService> service = mService.promote();
+    if (service != nullptr) {
+        service->updateUidStates();
     }
 }
 
-void AudioPolicyService::UidPolicy::updateUidCache(uid_t uid, bool active, bool insert) {
-    if (isServiceUid(uid)) return;
-    bool wasActive = false;
+void AudioPolicyService::UidPolicy::updateUid(std::unordered_map<uid_t,
+                                              std::pair<bool, int>> *uids,
+                                              uid_t uid,
+                                              bool active,
+                                              int state,
+                                              bool insert) {
+    if (isServiceUid(uid)) {
+        return;
+    }
+    bool wasActive = isUidActive(uid);
+    int previousState = getUidState(uid);
     {
         Mutex::Autolock _l(mLock);
-        updateUidLocked(&mCachedUids, uid, active, insert, nullptr, &wasActive);
-        // Do not notify service if currently overridden.
-        if (mOverrideUids.find(uid) != mOverrideUids.end()) return;
+        updateUidLocked(uids, uid, active, state, insert);
     }
-    bool nowActive = active && insert;
-    if (wasActive != nowActive) notifyService(uid, nowActive);
+    if (wasActive != isUidActive(uid) || state != previousState) {
+        notifyService();
+    }
 }
 
-void AudioPolicyService::UidPolicy::updateUidLocked(std::unordered_map<uid_t, bool> *uids,
-        uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive) {
+void AudioPolicyService::UidPolicy::updateUidLocked(std::unordered_map<uid_t,
+                                                    std::pair<bool, int>> *uids,
+                                                    uid_t uid,
+                                                    bool active,
+                                                    int state,
+                                                    bool insert) {
     auto it = uids->find(uid);
     if (it != uids->end()) {
-        if (wasThere != nullptr) *wasThere = true;
-        if (wasActive != nullptr) *wasActive = it->second;
         if (insert) {
-            it->second = active;
+            if (state == ActivityManager::PROCESS_STATE_UNKNOWN) {
+                it->second.first = active;
+            }
+            if (it->second.first) {
+                it->second.second = state;
+            } else {
+                it->second.second = ActivityManager::PROCESS_STATE_UNKNOWN;
+            }
         } else {
             uids->erase(it);
         }
-    } else if (insert) {
-        uids->insert(std::pair<uid_t, bool>(uid, active));
+    } else if (insert && (state == ActivityManager::PROCESS_STATE_UNKNOWN)) {
+        uids->insert(std::pair<uid_t, std::pair<bool, int>>(uid,
+                                      std::pair<bool, int>(active, state)));
     }
 }
 
+bool AudioPolicyService::UidPolicy::isA11yOnTop() {
+    for (const auto &uid : mCachedUids) {
+        std::vector<uid_t>::iterator it = find(mA11yUids.begin(), mA11yUids.end(), uid.first);
+        if (it == mA11yUids.end()) {
+            continue;
+        }
+        if (uid.second.second == ActivityManager::PROCESS_STATE_TOP ||
+            uid.second.second == ActivityManager::PROCESS_STATE_FOREGROUND_SERVICE ||
+            uid.second.second == ActivityManager::PROCESS_STATE_BOUND_FOREGROUND_SERVICE) {
+            return true;
+        }
+    }
+    return false;
+}
+
 bool AudioPolicyService::UidPolicy::isA11yUid(uid_t uid)
 {
     std::vector<uid_t>::iterator it = find(mA11yUids.begin(), mA11yUids.end(), uid);
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index ec32511..23c3daa 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -94,8 +94,7 @@
                                      audio_input_flags_t flags,
                                      audio_port_handle_t *selectedDeviceId = NULL,
                                      audio_port_handle_t *portId = NULL);
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool *silenced);
+    virtual status_t startInput(audio_port_handle_t portId);
     virtual status_t stopInput(audio_port_handle_t portId);
     virtual void releaseInput(audio_port_handle_t portId);
     virtual status_t initStreamVolume(audio_stream_type_t stream,
@@ -168,6 +167,8 @@
                                      int delayMs = 0);
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes);
 
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
@@ -217,6 +218,8 @@
     virtual status_t setAssistantUid(uid_t uid);
     virtual status_t setA11yServicesUids(const std::vector<uid_t>& uids);
 
+    virtual bool     isHapticPlaybackSupported();
+
             status_t doStopOutput(audio_port_handle_t portId);
             void doReleaseOutput(audio_port_handle_t portId);
 
@@ -253,7 +256,7 @@
     virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
 
     // Sets whether the given UID records only silence
-    virtual void setAppState(uid_t uid, app_state_t state);
+    virtual void setAppState_l(uid_t uid, app_state_t state);
 
     // Overrides the UID state as if it is idle
     status_t handleSetUidState(Vector<String16>& args, int err);
@@ -271,6 +274,13 @@
 
     status_t getAudioPolicyEffects(sp<AudioPolicyEffects>& audioPolicyEffects);
 
+    app_state_t apmStatFromAmState(int amState);
+
+    void updateUidStates();
+    void updateUidStates_l();
+
+    static bool isPrivacySensitive(audio_source_t source);
+
     // If recording we need to make sure the UID is allowed to do that. If the UID is idle
     // then it cannot record and gets buffers with zeros - silence. As soon as the UID
     // transitions to an active state we will start reporting buffers with data. This approach
@@ -289,31 +299,37 @@
         void binderDied(const wp<IBinder> &who) override;
 
         bool isUidActive(uid_t uid);
+        int getUidState(uid_t uid);
         void setAssistantUid(uid_t uid) { mAssistantUid = uid; }
         bool isAssistantUid(uid_t uid) { return uid == mAssistantUid; }
         void setA11yUids(const std::vector<uid_t>& uids) { mA11yUids.clear(); mA11yUids = uids; }
         bool isA11yUid(uid_t uid);
+        bool isA11yOnTop();
 
         // BnUidObserver implementation
         void onUidActive(uid_t uid) override;
         void onUidGone(uid_t uid, bool disabled) override;
         void onUidIdle(uid_t uid, bool disabled) override;
+        void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq);
 
         void addOverrideUid(uid_t uid, bool active) { updateOverrideUid(uid, active, true); }
         void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
 
-    private:
-        void notifyService(uid_t uid, bool active);
+        void updateUid(std::unordered_map<uid_t, std::pair<bool, int>> *uids,
+                       uid_t uid, bool active, int state, bool insert);
+
+     private:
+        void notifyService();
         void updateOverrideUid(uid_t uid, bool active, bool insert);
-        void updateUidCache(uid_t uid, bool active, bool insert);
-        void updateUidLocked(std::unordered_map<uid_t, bool> *uids,
-                uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive);
+        void updateUidLocked(std::unordered_map<uid_t, std::pair<bool, int>> *uids,
+                             uid_t uid, bool active, int state, bool insert);
+        void checkRegistered();
 
         wp<AudioPolicyService> mService;
         Mutex mLock;
         bool mObserverRegistered;
-        std::unordered_map<uid_t, bool> mOverrideUids;
-        std::unordered_map<uid_t, bool> mCachedUids;
+        std::unordered_map<uid_t, std::pair<bool, int>> mOverrideUids;
+        std::unordered_map<uid_t, std::pair<bool, int>> mCachedUids;
         uid_t mAssistantUid;
         std::vector<uid_t> mA11yUids;
     };
@@ -640,12 +656,11 @@
                           const audio_session_t session, const audio_port_handle_t deviceId,
                           const String16& opPackageName) :
                     AudioClient(attributes, io, uid, pid, session, deviceId),
-                    opPackageName(opPackageName), isConcurrent(false), isVirtualDevice(false) {}
+                    opPackageName(opPackageName), startTimeNs(0) {}
                 ~AudioRecordClient() override = default;
 
         const String16 opPackageName;        // client package name
-        bool isConcurrent;             // is allowed to concurrent capture
-        bool isVirtualDevice;          // uses virtual device: updated by APM::getInputForAttr()
+        nsecs_t startTimeNs;
     };
 
     // --- AudioPlaybackClient ---
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index 513312e..2ccb542 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -41,6 +41,9 @@
   libmedia_helper \
   libutils
 
+LOCAL_HEADER_LIBRARIES := \
+  libmedia_headers
+
 LOCAL_SRC_FILES := \
   systemaudio_tests.cpp \
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 32b07fa..6003607 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2215,6 +2215,8 @@
     sCameraService->removeByClient(this);
     sCameraService->logDisconnected(mCameraIdStr, mClientPid,
             String8(mClientPackageName));
+    sCameraService->mCameraProviderManager->removeRef(CameraProviderManager::DeviceMode::CAMERA,
+            mCameraIdStr.c_str());
 
     sp<IBinder> remote = getRemote();
     if (remote != nullptr) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 064863f..d332f6e 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -536,6 +536,7 @@
         void onUidGone(uid_t uid, bool disabled);
         void onUidActive(uid_t uid);
         void onUidIdle(uid_t uid, bool disabled);
+        void onUidStateChanged(uid_t uid __unused, int32_t procState __unused, int64_t procStateSeq __unused) {}
 
         void addOverrideUid(uid_t uid, String16 callingPackage, bool active);
         void removeOverrideUid(uid_t uid, String16 callingPackage);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 6da05c3..c1a4c11 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -27,6 +27,8 @@
 #include <camera/CameraUtils.h>
 
 #include "common/CameraDeviceBase.h"
+#include "device3/Camera3Device.h"
+#include "device3/Camera3OutputStream.h"
 #include "api2/CameraDeviceClient.h"
 
 #include <camera_metadata_hidden.h>
@@ -471,6 +473,68 @@
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
+    res = checkOperatingModeLocked(operatingMode);
+    if (!res.isOk()) {
+        return res;
+    }
+
+    status_t err = mDevice->configureStreams(sessionParams, operatingMode);
+    if (err == BAD_VALUE) {
+        String8 msg = String8::format("Camera %s: Unsupported set of inputs/outputs provided",
+                mCameraIdStr.string());
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    } else if (err != OK) {
+        String8 msg = String8::format("Camera %s: Error configuring streams: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+
+    return res;
+}
+
+binder::Status CameraDeviceClient::checkSurfaceTypeLocked(size_t numBufferProducers,
+        bool deferredConsumer, int surfaceType) const {
+    if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+        ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+                __FUNCTION__, numBufferProducers, MAX_SURFACES_PER_STREAM);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+    } else if ((numBufferProducers == 0) && (!deferredConsumer)) {
+        ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "No valid consumers.");
+    }
+
+    bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
+            (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
+    if (deferredConsumer && !validSurfaceType) {
+        ALOGE("%s: Target surface has invalid surfaceType = %d.", __FUNCTION__, surfaceType);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
+    }
+
+    return binder::Status::ok();
+}
+
+binder::Status CameraDeviceClient::checkPhysicalCameraIdLocked(String8 physicalCameraId) {
+    if (physicalCameraId.size() > 0) {
+        std::vector<std::string> physicalCameraIds;
+        bool logicalCamera =
+            mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
+        if (!logicalCamera ||
+                std::find(physicalCameraIds.begin(), physicalCameraIds.end(),
+                    physicalCameraId.string()) == physicalCameraIds.end()) {
+            String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
+                    mCameraIdStr.string(), physicalCameraId.string());
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+    }
+
+    return binder::Status::ok();
+}
+
+binder::Status CameraDeviceClient::checkOperatingModeLocked(int operatingMode) const {
     if (operatingMode < 0) {
         String8 msg = String8::format(
             "Camera %s: Invalid operating mode %d requested", mCameraIdStr.string(), operatingMode);
@@ -479,7 +543,6 @@
                 msg.string());
     }
 
-    // Sanitize the high speed session against necessary capability bit.
     bool isConstrainedHighSpeed = (operatingMode == ICameraDeviceUser::CONSTRAINED_HIGH_SPEED_MODE);
     if (isConstrainedHighSpeed) {
         CameraMetadata staticInfo = mDevice->info();
@@ -502,17 +565,163 @@
         }
     }
 
-    status_t err = mDevice->configureStreams(sessionParams, operatingMode);
-    if (err == BAD_VALUE) {
-        String8 msg = String8::format("Camera %s: Unsupported set of inputs/outputs provided",
-                mCameraIdStr.string());
+    return binder::Status::ok();
+}
+
+void CameraDeviceClient::mapStreamInfo(const OutputStreamInfo &streamInfo,
+            camera3_stream_rotation_t rotation, String8 physicalId,
+            hardware::camera::device::V3_4::Stream *stream /*out*/) {
+    if (stream == nullptr) {
+        return;
+    }
+
+    stream->v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
+    stream->v3_2.width = streamInfo.width;
+    stream->v3_2.height = streamInfo.height;
+    stream->v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
+    auto u = streamInfo.consumerUsage;
+    camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
+    stream->v3_2.usage = Camera3Device::mapToConsumerUsage(u);
+    stream->v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
+    stream->v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
+    stream->physicalCameraId = std::string(physicalId.string());
+    stream->bufferSize = 0;
+}
+
+binder::Status CameraDeviceClient::isSessionConfigurationSupported(
+        const SessionConfiguration& sessionConfiguration, bool *status /*out*/) {
+    ATRACE_CALL();
+
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
+
+    auto operatingMode = sessionConfiguration.getOperatingMode();
+    res = checkOperatingModeLocked(operatingMode);
+    if (!res.isOk()) {
+        return res;
+    }
+
+    if (status == nullptr) {
+        String8 msg = String8::format( "Camera %s: Invalid status!", mCameraIdStr.string());
         ALOGE("%s: %s", __FUNCTION__, msg.string());
-        res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
-    } else if (err != OK) {
-        String8 msg = String8::format("Camera %s: Error configuring streams: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+
+    hardware::camera::device::V3_4::StreamConfiguration streamConfiguration;
+    auto ret = Camera3Device::mapToStreamConfigurationMode(
+            static_cast<camera3_stream_configuration_mode_t> (operatingMode),
+            /*out*/ &streamConfiguration.operationMode);
+    if (ret != OK) {
+        String8 msg = String8::format(
+            "Camera %s: Failed mapping operating mode %d requested: %s (%d)", mCameraIdStr.string(),
+            operatingMode, strerror(-ret), ret);
         ALOGE("%s: %s", __FUNCTION__, msg.string());
-        res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                msg.string());
+    }
+
+    bool isInputValid = (sessionConfiguration.getInputWidth() > 0) &&
+            (sessionConfiguration.getInputHeight() > 0) &&
+            (sessionConfiguration.getInputFormat() > 0);
+    auto outputConfigs = sessionConfiguration.getOutputConfigurations();
+    size_t streamCount = outputConfigs.size();
+    streamCount = isInputValid ? streamCount + 1 : streamCount;
+    streamConfiguration.streams.resize(streamCount);
+    size_t streamIdx = 0;
+    if (isInputValid) {
+        streamConfiguration.streams[streamIdx++] = {{/*streamId*/0,
+                hardware::camera::device::V3_2::StreamType::INPUT,
+                static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
+                static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
+                Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
+                /*usage*/ 0, HAL_DATASPACE_UNKNOWN,
+                hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
+                /*physicalId*/ nullptr, /*bufferSize*/0};
+    }
+
+    for (const auto &it : outputConfigs) {
+        const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+            it.getGraphicBufferProducers();
+        bool deferredConsumer = it.isDeferred();
+        String8 physicalCameraId = String8(it.getPhysicalCameraId());
+        size_t numBufferProducers = bufferProducers.size();
+        bool isStreamInfoValid = false;
+        OutputStreamInfo streamInfo;
+
+        res = checkSurfaceTypeLocked(numBufferProducers, deferredConsumer, it.getSurfaceType());
+        if (!res.isOk()) {
+            return res;
+        }
+
+        res = checkPhysicalCameraIdLocked(physicalCameraId);
+        if (!res.isOk()) {
+            return res;
+        }
+
+        if (deferredConsumer) {
+            streamInfo.width = it.getWidth();
+            streamInfo.height = it.getHeight();
+            streamInfo.format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+            streamInfo.dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
+            auto surfaceType = it.getSurfaceType();
+            streamInfo.consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
+            if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
+                streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
+            }
+            mapStreamInfo(streamInfo, CAMERA3_STREAM_ROTATION_0, physicalCameraId,
+                    &streamConfiguration.streams[streamIdx++]);
+            isStreamInfoValid = true;
+
+            if (numBufferProducers == 0) {
+                continue;
+            }
+        }
+
+        for (auto& bufferProducer : bufferProducers) {
+            sp<Surface> surface;
+            res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+                    physicalCameraId);
+
+            if (!res.isOk())
+                return res;
+
+            if (!isStreamInfoValid) {
+                mapStreamInfo(streamInfo, static_cast<camera3_stream_rotation_t> (it.getRotation()),
+                        physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+                isStreamInfoValid = true;
+            }
+        }
+    }
+
+    *status = false;
+    ret = mProviderManager->isSessionConfigurationSupported(mCameraIdStr.string(),
+            streamConfiguration, status);
+    switch (ret) {
+        case OK:
+            // Expected, do nothing.
+            break;
+        case INVALID_OPERATION: {
+                String8 msg = String8::format(
+                        "Camera %s: Session configuration query not supported!",
+                        mCameraIdStr.string());
+                ALOGD("%s: %s", __FUNCTION__, msg.string());
+                res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+            }
+
+            break;
+        default: {
+                String8 msg = String8::format( "Camera %s: Error: %s (%d)", mCameraIdStr.string(),
+                        strerror(-ret), ret);
+                ALOGE("%s: %s", __FUNCTION__, msg.string());
+                res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                        msg.string());
+            }
     }
 
     return res;
@@ -605,40 +814,23 @@
     bool deferredConsumer = outputConfiguration.isDeferred();
     bool isShared = outputConfiguration.isShared();
     String8 physicalCameraId = String8(outputConfiguration.getPhysicalCameraId());
-
-    if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
-        ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
-              __FUNCTION__, bufferProducers.size(), MAX_SURFACES_PER_STREAM);
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
-    }
     bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
-    int surfaceType = outputConfiguration.getSurfaceType();
-    bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
-            (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
 
-    if (deferredConsumer && !validSurfaceType) {
-        ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
-                __FUNCTION__, bufferProducers[0].get(), surfaceType);
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
+    res = checkSurfaceTypeLocked(numBufferProducers, deferredConsumer,
+            outputConfiguration.getSurfaceType());
+    if (!res.isOk()) {
+        return res;
     }
 
     if (!mDevice.get()) {
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
-    if (physicalCameraId.size() > 0) {
-        std::vector<std::string> physicalCameraIds;
-        bool logicalCamera =
-                mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
-        if (!logicalCamera ||
-                std::find(physicalCameraIds.begin(), physicalCameraIds.end(),
-                physicalCameraId.string()) == physicalCameraIds.end()) {
-            String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
-                    mCameraIdStr.string(), physicalCameraId.string());
-            ALOGE("%s: %s", __FUNCTION__, msg.string());
-            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
-        }
+    res = checkPhysicalCameraIdLocked(physicalCameraId);
+    if (!res.isOk()) {
+        return res;
     }
+
     std::vector<sp<Surface>> surfaces;
     std::vector<sp<IBinder>> binders;
     status_t err;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 09ce977..17a0983 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -20,6 +20,7 @@
 #include <android/hardware/camera2/BnCameraDeviceUser.h>
 #include <android/hardware/camera2/ICameraDeviceCallbacks.h>
 #include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
 
 #include "CameraService.h"
@@ -89,6 +90,12 @@
     virtual binder::Status endConfigure(int operatingMode,
             const hardware::camera2::impl::CameraMetadataNative& sessionParams) override;
 
+    // Verify specific session configuration.
+    virtual binder::Status isSessionConfigurationSupported(
+            const SessionConfiguration& sessionConfiguration,
+            /*out*/
+            bool* streamStatus) override;
+
     // Returns -EBUSY if device is not idle or in error state
     virtual binder::Status deleteStream(int streamId) override;
 
@@ -230,6 +237,13 @@
 
     /** Utility members */
     binder::Status checkPidStatus(const char* checkLocation);
+    binder::Status checkOperatingModeLocked(int operatingMode) const;
+    binder::Status checkPhysicalCameraIdLocked(String8 physicalCameraId);
+    binder::Status checkSurfaceTypeLocked(size_t numBufferProducers, bool deferredConsumer,
+            int surfaceType) const;
+    static void mapStreamInfo(const OutputStreamInfo &streamInfo,
+            camera3_stream_rotation_t rotation, String8 physicalId,
+            hardware::camera::device::V3_4::Stream *stream /*out*/);
     bool enforceRequestPermissions(CameraMetadata& metadata);
 
     // Find the square of the euclidean distance between two points
@@ -300,7 +314,7 @@
     // stream ID -> outputStreamInfo mapping
     std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
 
-    static const int32_t MAX_SURFACES_PER_STREAM = 2;
+    static const int32_t MAX_SURFACES_PER_STREAM = 4;
     sp<CameraProviderManager> mProviderManager;
 };
 
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 2542ab2..a9cbe72 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -24,23 +24,37 @@
 
 #include <algorithm>
 #include <chrono>
+#include <future>
 #include <inttypes.h>
 #include <hardware/camera_common.h>
 #include <hidl/ServiceManagement.h>
 #include <functional>
 #include <camera_metadata_hidden.h>
 #include <android-base/parseint.h>
+#include <android-base/logging.h>
+#include <cutils/properties.h>
+#include <hwbinder/IPCThreadState.h>
+#include <utils/Trace.h>
 
 namespace android {
 
 using namespace ::android::hardware::camera;
 using namespace ::android::hardware::camera::common::V1_0;
+using std::literals::chrono_literals::operator""s;
 
 namespace {
 // Hardcoded name for the passthrough HAL implementation, since it can't be discovered via the
 // service manager
 const std::string kLegacyProviderName("legacy/0");
 const std::string kExternalProviderName("external/0");
+const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
+
+// The extra amount of time to hold a reference to an ICameraProvider after it is no longer needed.
+// Hold the reference for this extra time so that if the camera is unreferenced and then referenced
+// again quickly, we do not let the HAL exit and then need to immediately restart it. An example
+// when this could happen is switching from a front-facing to a rear-facing camera. If the HAL were
+// to exit during the camera switch, the camera could appear janky to the user.
+const std::chrono::system_clock::duration kCameraKeepAliveDelay = 3s;
 
 } // anonymous namespace
 
@@ -74,6 +88,8 @@
     addProviderLocked(kLegacyProviderName, /*expected*/ false);
     addProviderLocked(kExternalProviderName, /*expected*/ false);
 
+    IPCThreadState::self()->flushCommands();
+
     return OK;
 }
 
@@ -177,6 +193,19 @@
     return deviceInfo->getCameraInfo(info);
 }
 
+status_t CameraProviderManager::isSessionConfigurationSupported(const std::string& id,
+        const hardware::camera::device::V3_4::StreamConfiguration &configuration,
+        bool *status /*out*/) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) {
+        return NAME_NOT_FOUND;
+    }
+
+    return deviceInfo->isSessionConfigurationSupported(configuration, status);
+}
+
 status_t CameraProviderManager::getCameraCharacteristics(const std::string &id,
         CameraMetadata* characteristics) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -206,26 +235,15 @@
     return OK;
 }
 
-bool CameraProviderManager::supportSetTorchMode(const std::string &id) {
+bool CameraProviderManager::supportSetTorchMode(const std::string &id) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
-    bool support = false;
     for (auto& provider : mProviders) {
         auto deviceInfo = findDeviceInfoLocked(id);
         if (deviceInfo != nullptr) {
-            auto ret = provider->mInterface->isSetTorchModeSupported(
-                [&support](auto status, bool supported) {
-                    if (status == Status::OK) {
-                        support = supported;
-                    }
-                });
-            if (!ret.isOk()) {
-                ALOGE("%s: Transaction error checking torch mode support '%s': %s",
-                        __FUNCTION__, provider->mProviderName.c_str(), ret.description().c_str());
-            }
-            break;
+            return provider->mSetTorchModeSupported;
         }
     }
-    return support;
+    return false;
 }
 
 status_t CameraProviderManager::setTorchMode(const std::string &id, bool enabled) {
@@ -234,6 +252,15 @@
     auto deviceInfo = findDeviceInfoLocked(id);
     if (deviceInfo == nullptr) return NAME_NOT_FOUND;
 
+    // Pass the camera ID to start interface so that it will save it to the map of ICameraProviders
+    // that are currently in use.
+    const sp<provider::V2_4::ICameraProvider> interface =
+            deviceInfo->mParentProvider->startProviderInterface();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    saveRef(DeviceMode::TORCH, deviceInfo->mId, interface);
+
     return deviceInfo->setTorchMode(enabled);
 }
 
@@ -241,37 +268,7 @@
     sp<VendorTagDescriptorCache> tagCache = new VendorTagDescriptorCache();
 
     for (auto& provider : mProviders) {
-        hardware::hidl_vec<VendorTagSection> vts;
-        Status status;
-        hardware::Return<void> ret;
-        ret = provider->mInterface->getVendorTags(
-            [&](auto s, const auto& vendorTagSecs) {
-                status = s;
-                if (s == Status::OK) {
-                    vts = vendorTagSecs;
-                }
-        });
-        if (!ret.isOk()) {
-            ALOGE("%s: Transaction error getting vendor tags from provider '%s': %s",
-                    __FUNCTION__, provider->mProviderName.c_str(), ret.description().c_str());
-            return DEAD_OBJECT;
-        }
-        if (status != Status::OK) {
-            return mapToStatusT(status);
-        }
-
-        // Read all vendor tag definitions into a descriptor
-        sp<VendorTagDescriptor> desc;
-        status_t res;
-        if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/desc))
-                != OK) {
-            ALOGE("%s: Could not generate descriptor from vendor tag operations,"
-                  "received error %s (%d). Camera clients will not be able to use"
-                  "vendor tags", __FUNCTION__, strerror(res), res);
-            return res;
-        }
-
-        tagCache->addVendorDescriptor(provider->mProviderTagid, desc);
+        tagCache->addVendorDescriptor(provider->mProviderTagid, provider->mVendorTagDescriptor);
     }
 
     VendorTagDescriptorCache::setAsGlobalVendorTagCache(tagCache);
@@ -280,9 +277,9 @@
 }
 
 status_t CameraProviderManager::openSession(const std::string &id,
-        const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
+        const sp<device::V3_2::ICameraDeviceCallback>& callback,
         /*out*/
-        sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session) {
+        sp<device::V3_2::ICameraDeviceSession> *session) {
 
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
@@ -291,10 +288,22 @@
     if (deviceInfo == nullptr) return NAME_NOT_FOUND;
 
     auto *deviceInfo3 = static_cast<ProviderInfo::DeviceInfo3*>(deviceInfo);
+    const sp<provider::V2_4::ICameraProvider> provider =
+            deviceInfo->mParentProvider->startProviderInterface();
+    if (provider == nullptr) {
+        return DEAD_OBJECT;
+    }
+    saveRef(DeviceMode::CAMERA, id, provider);
 
     Status status;
     hardware::Return<void> ret;
-    ret = deviceInfo3->mInterface->open(callback, [&status, &session]
+    auto interface = deviceInfo3->startDeviceInterface<
+            CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+
+    ret = interface->open(callback, [&status, &session]
             (Status s, const sp<device::V3_2::ICameraDeviceSession>& cameraSession) {
                 status = s;
                 if (status == Status::OK) {
@@ -302,6 +311,7 @@
                 }
             });
     if (!ret.isOk()) {
+        removeRef(DeviceMode::CAMERA, id);
         ALOGE("%s: Transaction error opening a session for camera device %s: %s",
                 __FUNCTION__, id.c_str(), ret.description().c_str());
         return DEAD_OBJECT;
@@ -310,9 +320,9 @@
 }
 
 status_t CameraProviderManager::openSession(const std::string &id,
-        const sp<hardware::camera::device::V1_0::ICameraDeviceCallback>& callback,
+        const sp<device::V1_0::ICameraDeviceCallback>& callback,
         /*out*/
-        sp<hardware::camera::device::V1_0::ICameraDevice> *session) {
+        sp<device::V1_0::ICameraDevice> *session) {
 
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
@@ -321,19 +331,82 @@
     if (deviceInfo == nullptr) return NAME_NOT_FOUND;
 
     auto *deviceInfo1 = static_cast<ProviderInfo::DeviceInfo1*>(deviceInfo);
+    const sp<provider::V2_4::ICameraProvider> provider =
+            deviceInfo->mParentProvider->startProviderInterface();
+    if (provider == nullptr) {
+        return DEAD_OBJECT;
+    }
+    saveRef(DeviceMode::CAMERA, id, provider);
 
-    hardware::Return<Status> status = deviceInfo1->mInterface->open(callback);
+    auto interface = deviceInfo1->startDeviceInterface<
+            CameraProviderManager::ProviderInfo::DeviceInfo1::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    hardware::Return<Status> status = interface->open(callback);
     if (!status.isOk()) {
+        removeRef(DeviceMode::CAMERA, id);
         ALOGE("%s: Transaction error opening a session for camera device %s: %s",
                 __FUNCTION__, id.c_str(), status.description().c_str());
         return DEAD_OBJECT;
     }
     if (status == Status::OK) {
-        *session = deviceInfo1->mInterface;
+        *session = interface;
     }
     return mapToStatusT(status);
 }
 
+void CameraProviderManager::saveRef(DeviceMode usageType, const std::string &cameraId,
+        sp<provider::V2_4::ICameraProvider> provider) {
+    if (!kEnableLazyHal) {
+        return;
+    }
+    ALOGI("Saving camera provider %s for camera device %s", provider->descriptor, cameraId.c_str());
+    std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
+    std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *primaryMap, *alternateMap;
+    if (usageType == DeviceMode::TORCH) {
+        primaryMap = &mTorchProviderByCameraId;
+        alternateMap = &mCameraProviderByCameraId;
+    } else {
+        primaryMap = &mCameraProviderByCameraId;
+        alternateMap = &mTorchProviderByCameraId;
+    }
+    auto id = cameraId.c_str();
+    (*primaryMap)[id] = provider;
+    auto search = alternateMap->find(id);
+    if (search != alternateMap->end()) {
+        ALOGW("%s: Camera device %s is using both torch mode and camera mode simultaneously. "
+                "That should not be possible", __FUNCTION__, id);
+    }
+    ALOGV("%s: Camera device %s connected", __FUNCTION__, id);
+}
+
+void CameraProviderManager::removeRef(DeviceMode usageType, const std::string &cameraId) {
+    if (!kEnableLazyHal) {
+        return;
+    }
+    ALOGI("Removing camera device %s", cameraId.c_str());
+    std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *providerMap;
+    if (usageType == DeviceMode::TORCH) {
+        providerMap = &mTorchProviderByCameraId;
+    } else {
+        providerMap = &mCameraProviderByCameraId;
+    }
+    std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
+    auto search = providerMap->find(cameraId.c_str());
+    if (search != providerMap->end()) {
+        auto ptr = search->second;
+        auto future = std::async(std::launch::async, [ptr] {
+            std::this_thread::sleep_for(kCameraKeepAliveDelay);
+            IPCThreadState::self()->flushCommands();
+        });
+        providerMap->erase(cameraId.c_str());
+    } else {
+        ALOGE("%s: Asked to remove reference for camera %s, but no reference to it was found. This "
+                "could mean removeRef was called twice for the same camera ID.", __FUNCTION__,
+                cameraId.c_str());
+    }
+}
 
 hardware::Return<void> CameraProviderManager::onRegistration(
         const hardware::hidl_string& /*fqName*/,
@@ -351,6 +424,8 @@
         listener->onNewProviderRegistered();
     }
 
+    IPCThreadState::self()->flushCommands();
+
     return hardware::Return<void>();
 }
 
@@ -598,9 +673,8 @@
         }
     }
 
-    sp<ProviderInfo> providerInfo =
-            new ProviderInfo(newProvider, interface, this);
-    status_t res = providerInfo->initialize();
+    sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
+    status_t res = providerInfo->initialize(interface);
     if (res != OK) {
         return res;
     }
@@ -652,27 +726,26 @@
 
 CameraProviderManager::ProviderInfo::ProviderInfo(
         const std::string &providerName,
-        sp<provider::V2_4::ICameraProvider>& interface,
         CameraProviderManager *manager) :
         mProviderName(providerName),
-        mInterface(interface),
         mProviderTagid(generateVendorTagId(providerName)),
         mUniqueDeviceCount(0),
         mManager(manager) {
     (void) mManager;
 }
 
-status_t CameraProviderManager::ProviderInfo::initialize() {
+status_t CameraProviderManager::ProviderInfo::initialize(
+        sp<provider::V2_4::ICameraProvider>& interface) {
     status_t res = parseProviderName(mProviderName, &mType, &mId);
     if (res != OK) {
         ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
         return BAD_VALUE;
     }
     ALOGI("Connecting to new camera provider: %s, isRemote? %d",
-            mProviderName.c_str(), mInterface->isRemote());
+            mProviderName.c_str(), interface->isRemote());
     // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
     // before setCallback returns
-    hardware::Return<Status> status = mInterface->setCallback(this);
+    hardware::Return<Status> status = interface->setCallback(this);
     if (!status.isOk()) {
         ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
                 __FUNCTION__, mProviderName.c_str(), status.description().c_str());
@@ -684,7 +757,7 @@
         return mapToStatusT(status);
     }
 
-    hardware::Return<bool> linked = mInterface->linkToDeath(this, /*cookie*/ mId);
+    hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
     if (!linked.isOk()) {
         ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
                 __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
@@ -696,7 +769,7 @@
 
     // Get initial list of camera devices, if any
     std::vector<std::string> devices;
-    hardware::Return<void> ret = mInterface->getCameraIdList([&status, this, &devices](
+    hardware::Return<void> ret = interface->getCameraIdList([&status, this, &devices](
             Status idStatus,
             const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
         status = idStatus;
@@ -725,11 +798,24 @@
         return mapToStatusT(status);
     }
 
+    ret = interface->isSetTorchModeSupported(
+        [this](auto status, bool supported) {
+            if (status == Status::OK) {
+                mSetTorchModeSupported = supported;
+            }
+        });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error checking torch mode support '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), ret.description().c_str());
+        return DEAD_OBJECT;
+    }
+
+    mIsRemote = interface->isRemote();
+
     sp<StatusListener> listener = mManager->getStatusListener();
     for (auto& device : devices) {
         std::string id;
-        status_t res = addDevice(device,
-                hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT, &id);
+        status_t res = addDevice(device, common::V1_0::CameraDeviceStatus::PRESENT, &id);
         if (res != OK) {
             ALOGE("%s: Unable to enumerate camera device '%s': %s (%d)",
                     __FUNCTION__, device.c_str(), strerror(-res), res);
@@ -737,13 +823,53 @@
         }
     }
 
+    res = setUpVendorTags();
+    if (res != OK) {
+        ALOGE("%s: Unable to set up vendor tags from provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return res;
+    }
+
     ALOGI("Camera provider %s ready with %zu camera devices",
             mProviderName.c_str(), mDevices.size());
 
     mInitialized = true;
+    if (!kEnableLazyHal) {
+        // Save HAL reference indefinitely
+        mSavedInterface = interface;
+    }
     return OK;
 }
 
+const sp<provider::V2_4::ICameraProvider>
+CameraProviderManager::ProviderInfo::startProviderInterface() {
+    ATRACE_CALL();
+    ALOGI("Request to start camera provider: %s", mProviderName.c_str());
+    if (mSavedInterface != nullptr) {
+        return mSavedInterface;
+    }
+    auto interface = mActiveInterface.promote();
+    if (interface == nullptr) {
+        ALOGI("Could not promote, calling getService(%s)", mProviderName.c_str());
+        interface = mManager->mServiceProxy->getService(mProviderName);
+        interface->setCallback(this);
+        hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
+        if (!linked.isOk()) {
+            ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
+                    __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+            mManager->removeProvider(mProviderName);
+            return nullptr;
+        } else if (!linked) {
+            ALOGW("%s: Unable to link to provider '%s' death notifications",
+                    __FUNCTION__, mProviderName.c_str());
+        }
+        mActiveInterface = interface;
+    } else {
+        ALOGI("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
+    }
+    return interface;
+}
+
 const std::string& CameraProviderManager::ProviderInfo::getType() const {
     return mType;
 }
@@ -825,7 +951,7 @@
 
 status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
     dprintf(fd, "== Camera Provider HAL %s (v2.4, %s) static info: %zu devices: ==\n",
-            mProviderName.c_str(), mInterface->isRemote() ? "remote" : "passthrough",
+            mProviderName.c_str(), mIsRemote ? "remote" : "passthrough",
             mDevices.size());
 
     for (auto& device : mDevices) {
@@ -953,6 +1079,9 @@
                         torchStatusToString(newStatus));
                 id = deviceInfo->mId;
                 known = true;
+                if (TorchModeStatus::AVAILABLE_ON != newStatus) {
+                    mManager->removeRef(DeviceMode::TORCH, id);
+                }
                 break;
             }
         }
@@ -981,15 +1110,55 @@
     mManager->removeProvider(mProviderName);
 }
 
+status_t CameraProviderManager::ProviderInfo::setUpVendorTags() {
+    if (mVendorTagDescriptor != nullptr)
+        return OK;
+
+    hardware::hidl_vec<VendorTagSection> vts;
+    Status status;
+    hardware::Return<void> ret;
+    const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    ret = interface->getVendorTags(
+        [&](auto s, const auto& vendorTagSecs) {
+            status = s;
+            if (s == Status::OK) {
+                vts = vendorTagSecs;
+            }
+    });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error getting vendor tags from provider '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), ret.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (status != Status::OK) {
+        return mapToStatusT(status);
+    }
+
+    // Read all vendor tag definitions into a descriptor
+    status_t res;
+    if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/mVendorTagDescriptor))
+            != OK) {
+        ALOGE("%s: Could not generate descriptor from vendor tag operations,"
+                "received error %s (%d). Camera clients will not be able to use"
+                "vendor tags", __FUNCTION__, strerror(res), res);
+        return res;
+    }
+
+    return OK;
+}
+
 template<class DeviceInfoT>
 std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
     CameraProviderManager::ProviderInfo::initializeDeviceInfo(
         const std::string &name, const metadata_vendor_id_t tagId,
-        const std::string &id, uint16_t minorVersion) const {
+        const std::string &id, uint16_t minorVersion) {
     Status status;
 
     auto cameraInterface =
-            getDeviceInterface<typename DeviceInfoT::InterfaceT>(name);
+            startDeviceInterface<typename DeviceInfoT::InterfaceT>(name);
     if (cameraInterface == nullptr) return nullptr;
 
     CameraResourceCost resourceCost;
@@ -1016,13 +1185,13 @@
     }
 
     return std::unique_ptr<DeviceInfo>(
-        new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
+        new DeviceInfoT(name, tagId, id, minorVersion, resourceCost, this,
                 mProviderPublicCameraIds, cameraInterface));
 }
 
 template<class InterfaceT>
 sp<InterfaceT>
-CameraProviderManager::ProviderInfo::getDeviceInterface(const std::string &name) const {
+CameraProviderManager::ProviderInfo::startDeviceInterface(const std::string &name) {
     ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
             name.c_str(), InterfaceT::version.get_major());
     return nullptr;
@@ -1030,12 +1199,16 @@
 
 template<>
 sp<device::V1_0::ICameraDevice>
-CameraProviderManager::ProviderInfo::getDeviceInterface
-        <device::V1_0::ICameraDevice>(const std::string &name) const {
+CameraProviderManager::ProviderInfo::startDeviceInterface
+        <device::V1_0::ICameraDevice>(const std::string &name) {
     Status status;
     sp<device::V1_0::ICameraDevice> cameraInterface;
     hardware::Return<void> ret;
-    ret = mInterface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
+    const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
+    if (interface == nullptr) {
+        return nullptr;
+    }
+    ret = interface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
         Status s, sp<device::V1_0::ICameraDevice> interface) {
                 status = s;
                 cameraInterface = interface;
@@ -1055,12 +1228,16 @@
 
 template<>
 sp<device::V3_2::ICameraDevice>
-CameraProviderManager::ProviderInfo::getDeviceInterface
-        <device::V3_2::ICameraDevice>(const std::string &name) const {
+CameraProviderManager::ProviderInfo::startDeviceInterface
+        <device::V3_2::ICameraDevice>(const std::string &name) {
     Status status;
     sp<device::V3_2::ICameraDevice> cameraInterface;
     hardware::Return<void> ret;
-    ret = mInterface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
+    const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
+    if (interface == nullptr) {
+        return nullptr;
+    }
+    ret = interface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
         Status s, sp<device::V3_2::ICameraDevice> interface) {
                 status = s;
                 cameraInterface = interface;
@@ -1081,6 +1258,18 @@
 CameraProviderManager::ProviderInfo::DeviceInfo::~DeviceInfo() {}
 
 template<class InterfaceT>
+sp<InterfaceT> CameraProviderManager::ProviderInfo::DeviceInfo::startDeviceInterface() {
+    sp<InterfaceT> device;
+    ATRACE_CALL();
+    if (mSavedInterface == nullptr) {
+        device = mParentProvider->startDeviceInterface<InterfaceT>(mName);
+    } else {
+        device = (InterfaceT *) mSavedInterface.get();
+    }
+    return device;
+}
+
+template<class InterfaceT>
 status_t CameraProviderManager::ProviderInfo::DeviceInfo::setTorchMode(InterfaceT& interface,
         bool enabled) {
     Status s = interface->setTorchMode(enabled ? TorchMode::ON : TorchMode::OFF);
@@ -1091,31 +1280,31 @@
         const metadata_vendor_id_t tagId, const std::string &id,
         uint16_t minorVersion,
         const CameraResourceCost& resourceCost,
+        sp<ProviderInfo> parentProvider,
         const std::vector<std::string>& publicCameraIds,
         sp<InterfaceT> interface) :
         DeviceInfo(name, tagId, id, hardware::hidl_version{1, minorVersion},
-                   publicCameraIds, resourceCost),
-        mInterface(interface) {
+                   publicCameraIds, resourceCost, parentProvider) {
     // Get default parameters and initialize flash unit availability
     // Requires powering on the camera device
-    hardware::Return<Status> status = mInterface->open(nullptr);
+    hardware::Return<Status> status = interface->open(nullptr);
     if (!status.isOk()) {
         ALOGE("%s: Transaction error opening camera device %s to check for a flash unit: %s",
-                __FUNCTION__, mId.c_str(), status.description().c_str());
+                __FUNCTION__, id.c_str(), status.description().c_str());
         return;
     }
     if (status != Status::OK) {
         ALOGE("%s: Unable to open camera device %s to check for a flash unit: %s", __FUNCTION__,
-                mId.c_str(), CameraProviderManager::statusToString(status));
+                id.c_str(), CameraProviderManager::statusToString(status));
         return;
     }
     hardware::Return<void> ret;
-    ret = mInterface->getParameters([this](const hardware::hidl_string& parms) {
+    ret = interface->getParameters([this](const hardware::hidl_string& parms) {
                 mDefaultParameters.unflatten(String8(parms.c_str()));
             });
     if (!ret.isOk()) {
         ALOGE("%s: Transaction error reading camera device %s params to check for a flash unit: %s",
-                __FUNCTION__, mId.c_str(), status.description().c_str());
+                __FUNCTION__, id.c_str(), status.description().c_str());
         return;
     }
     const char *flashMode =
@@ -1124,27 +1313,43 @@
         mHasFlashUnit = true;
     }
 
-    ret = mInterface->close();
+    status_t res = cacheCameraInfo(interface);
+    if (res != OK) {
+        ALOGE("%s: Could not cache CameraInfo", __FUNCTION__);
+        return;
+    }
+
+    ret = interface->close();
     if (!ret.isOk()) {
         ALOGE("%s: Transaction error closing camera device %s after check for a flash unit: %s",
-                __FUNCTION__, mId.c_str(), status.description().c_str());
+                __FUNCTION__, id.c_str(), status.description().c_str());
+    }
+
+    if (!kEnableLazyHal) {
+        // Save HAL reference indefinitely
+        mSavedInterface = interface;
     }
 }
 
 CameraProviderManager::ProviderInfo::DeviceInfo1::~DeviceInfo1() {}
 
 status_t CameraProviderManager::ProviderInfo::DeviceInfo1::setTorchMode(bool enabled) {
-    return DeviceInfo::setTorchMode(mInterface, enabled);
+    return setTorchModeForDevice<InterfaceT>(enabled);
 }
 
 status_t CameraProviderManager::ProviderInfo::DeviceInfo1::getCameraInfo(
         hardware::CameraInfo *info) const {
     if (info == nullptr) return BAD_VALUE;
+    *info = mInfo;
+    return OK;
+}
 
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::cacheCameraInfo(
+        sp<CameraProviderManager::ProviderInfo::DeviceInfo1::InterfaceT> interface) {
     Status status;
     device::V1_0::CameraInfo cInfo;
     hardware::Return<void> ret;
-    ret = mInterface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
+    ret = interface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
                 status = s;
                 cInfo = camInfo;
             });
@@ -1159,27 +1364,31 @@
 
     switch(cInfo.facing) {
         case device::V1_0::CameraFacing::BACK:
-            info->facing = hardware::CAMERA_FACING_BACK;
+            mInfo.facing = hardware::CAMERA_FACING_BACK;
             break;
         case device::V1_0::CameraFacing::EXTERNAL:
             // Map external to front for legacy API
         case device::V1_0::CameraFacing::FRONT:
-            info->facing = hardware::CAMERA_FACING_FRONT;
+            mInfo.facing = hardware::CAMERA_FACING_FRONT;
             break;
         default:
             ALOGW("%s: Device %s: Unknown camera facing: %d",
                     __FUNCTION__, mId.c_str(), cInfo.facing);
-            info->facing = hardware::CAMERA_FACING_BACK;
+            mInfo.facing = hardware::CAMERA_FACING_BACK;
     }
-    info->orientation = cInfo.orientation;
+    mInfo.orientation = cInfo.orientation;
 
     return OK;
 }
 
-status_t CameraProviderManager::ProviderInfo::DeviceInfo1::dumpState(int fd) const {
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::dumpState(int fd) {
     native_handle_t* handle = native_handle_create(1,0);
     handle->data[0] = fd;
-    hardware::Return<Status> s = mInterface->dumpState(handle);
+    const sp<InterfaceT> interface = startDeviceInterface<InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    hardware::Return<Status> s = interface->dumpState(handle);
     native_handle_delete(handle);
     if (!s.isOk()) {
         return INVALID_OPERATION;
@@ -1191,15 +1400,15 @@
         const metadata_vendor_id_t tagId, const std::string &id,
         uint16_t minorVersion,
         const CameraResourceCost& resourceCost,
+        sp<ProviderInfo> parentProvider,
         const std::vector<std::string>& publicCameraIds,
         sp<InterfaceT> interface) :
         DeviceInfo(name, tagId, id, hardware::hidl_version{3, minorVersion},
-                   publicCameraIds, resourceCost),
-        mInterface(interface) {
+                   publicCameraIds, resourceCost, parentProvider) {
     // Get camera characteristics and initialize flash unit availability
     Status status;
     hardware::Return<void> ret;
-    ret = mInterface->getCameraCharacteristics([&status, this](Status s,
+    ret = interface->getCameraCharacteristics([&status, this](Status s,
                     device::V3_2::CameraMetadata metadata) {
                 status = s;
                 if (s == Status::OK) {
@@ -1218,13 +1427,13 @@
             });
     if (!ret.isOk()) {
         ALOGE("%s: Transaction error getting camera characteristics for device %s"
-                " to check for a flash unit: %s", __FUNCTION__, mId.c_str(),
+                " to check for a flash unit: %s", __FUNCTION__, id.c_str(),
                 ret.description().c_str());
         return;
     }
     if (status != Status::OK) {
         ALOGE("%s: Unable to get camera characteristics for device %s: %s (%d)",
-                __FUNCTION__, mId.c_str(), CameraProviderManager::statusToString(status), status);
+                __FUNCTION__, id.c_str(), CameraProviderManager::statusToString(status), status);
         return;
     }
     status_t res = fixupMonochromeTags();
@@ -1244,12 +1453,12 @@
 
     queryPhysicalCameraIds();
     // Get physical camera characteristics if applicable
-    auto castResult = device::V3_5::ICameraDevice::castFrom(mInterface);
+    auto castResult = device::V3_5::ICameraDevice::castFrom(interface);
     if (!castResult.isOk()) {
         ALOGV("%s: Unable to convert ICameraDevice instance to version 3.5", __FUNCTION__);
         return;
     }
-    sp<hardware::camera::device::V3_5::ICameraDevice> interface_3_5 = castResult;
+    sp<device::V3_5::ICameraDevice> interface_3_5 = castResult;
     if (interface_3_5 == nullptr) {
         ALOGE("%s: Converted ICameraDevice instance to nullptr", __FUNCTION__);
         return;
@@ -1283,7 +1492,7 @@
 
             if (!ret.isOk()) {
                 ALOGE("%s: Transaction error getting physical camera %s characteristics for %s: %s",
-                        __FUNCTION__, id.c_str(), mId.c_str(), ret.description().c_str());
+                        __FUNCTION__, id.c_str(), id.c_str(), ret.description().c_str());
                 return;
             }
             if (status != Status::OK) {
@@ -1294,12 +1503,17 @@
             }
         }
     }
+
+    if (!kEnableLazyHal) {
+        // Save HAL reference indefinitely
+        mSavedInterface = interface;
+    }
 }
 
 CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
 
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
-    return DeviceInfo::setTorchMode(mInterface, enabled);
+    return setTorchModeForDevice<InterfaceT>(enabled);
 }
 
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
@@ -1350,10 +1564,14 @@
     return isBackwardCompatible;
 }
 
-status_t CameraProviderManager::ProviderInfo::DeviceInfo3::dumpState(int fd) const {
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::dumpState(int fd) {
     native_handle_t* handle = native_handle_create(1,0);
     handle->data[0] = fd;
-    auto ret = mInterface->dumpState(handle);
+    const sp<InterfaceT> interface = startDeviceInterface<InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    auto ret = interface->dumpState(handle);
     native_handle_delete(handle);
     if (!ret.isOk()) {
         return INVALID_OPERATION;
@@ -1381,6 +1599,49 @@
     return OK;
 }
 
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::isSessionConfigurationSupported(
+        const hardware::camera::device::V3_4::StreamConfiguration &configuration,
+        bool *status /*out*/) {
+
+    const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+            this->startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    auto castResult = device::V3_5::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_5::ICameraDevice> interface_3_5 = castResult;
+    if (interface_3_5 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    status_t res;
+    Status callStatus;
+    auto ret =  interface_3_5->isStreamCombinationSupported(configuration,
+            [&callStatus, &status] (Status s, bool combStatus) {
+                callStatus = s;
+                *status = combStatus;
+            });
+    if (ret.isOk()) {
+        switch (callStatus) {
+            case Status::OK:
+                // Expected case, do nothing.
+                res = OK;
+                break;
+            case Status::METHOD_NOT_SUPPORTED:
+                res = INVALID_OPERATION;
+                break;
+            default:
+                ALOGE("%s: Session configuration query failed: %d", __FUNCTION__, callStatus);
+                res = UNKNOWN_ERROR;
+        }
+    } else {
+        ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+        res = UNKNOWN_ERROR;
+    }
+
+    return res;
+}
+
 status_t CameraProviderManager::ProviderInfo::parseProviderName(const std::string& name,
         std::string *type, uint32_t *id) {
     // Format must be "<type>/<id>"
@@ -1623,7 +1884,7 @@
 
 
 status_t HidlVendorTagDescriptor::createDescriptorFromHidl(
-        const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+        const hardware::hidl_vec<common::V1_0::VendorTagSection>& vts,
         /*out*/
         sp<VendorTagDescriptor>& descriptor) {
 
@@ -1651,7 +1912,7 @@
 
     int idx = 0;
     for (size_t s = 0; s < vts.size(); s++) {
-        const hardware::camera::common::V1_0::VendorTagSection& section = vts[s];
+        const common::V1_0::VendorTagSection& section = vts[s];
         const char *sectionName = section.sectionName.c_str();
         if (sectionName == NULL) {
             ALOGE("%s: no section name defined for vendor tag section %zu.", __FUNCTION__, s);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index c506d35..0966743 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -18,6 +18,7 @@
 #define ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_H
 
 #include <vector>
+#include <unordered_map>
 #include <unordered_set>
 #include <string>
 #include <mutex>
@@ -28,6 +29,7 @@
 #include <utils/Errors.h>
 #include <android/hardware/camera/common/1.0/types.h>
 #include <android/hardware/camera/provider/2.4/ICameraProvider.h>
+#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
 //#include <android/hardware/camera/provider/2.4/ICameraProviderCallbacks.h>
 #include <android/hidl/manager/1.0/IServiceNotification.h>
 #include <camera/VendorTagDescriptor.h>
@@ -110,6 +112,14 @@
     };
 
     /**
+     * Represents the mode a camera device is currently in
+     */
+    enum class DeviceMode {
+        TORCH,
+        CAMERA
+    };
+
+    /**
      * Initialize the manager and give it a status listener; optionally accepts a service
      * interaction proxy.
      *
@@ -166,6 +176,13 @@
             CameraMetadata* characteristics) const;
 
     /**
+     * Check for device support of specific stream combination.
+     */
+    status_t isSessionConfigurationSupported(const std::string& id,
+            const hardware::camera::device::V3_4::StreamConfiguration &configuration,
+            bool *status /*out*/) const;
+
+    /**
      * Return the highest supported device interface version for this ID
      */
     status_t getHighestSupportedVersion(const std::string &id,
@@ -174,7 +191,7 @@
     /**
      * Check if a given camera device support setTorchMode API.
      */
-    bool supportSetTorchMode(const std::string &id);
+    bool supportSetTorchMode(const std::string &id) const;
 
     /**
      * Turn on or off the flashlight on a given camera device.
@@ -205,6 +222,17 @@
             sp<hardware::camera::device::V1_0::ICameraDevice> *session);
 
     /**
+     * Save the ICameraProvider while it is being used by a camera or torch client
+     */
+    void saveRef(DeviceMode usageType, const std::string &cameraId,
+            sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
+
+    /**
+     * Notify that the camera or torch is no longer being used by a camera client
+     */
+    void removeRef(DeviceMode usageType, const std::string &cameraId);
+
+    /**
      * IServiceNotification::onRegistration
      * Invoked by the hardware service manager when a new camera provider is registered
      */
@@ -251,20 +279,43 @@
 
     static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
 
+    // Mapping from CameraDevice IDs to CameraProviders. This map is used to keep the
+    // ICameraProvider alive while it is in use by the camera with the given ID for camera
+    // capabilities
+    std::unordered_map<std::string, sp<hardware::camera::provider::V2_4::ICameraProvider>>
+            mCameraProviderByCameraId;
+
+    // Mapping from CameraDevice IDs to CameraProviders. This map is used to keep the
+    // ICameraProvider alive while it is in use by the camera with the given ID for torch
+    // capabilities
+    std::unordered_map<std::string, sp<hardware::camera::provider::V2_4::ICameraProvider>>
+            mTorchProviderByCameraId;
+
+    // Lock for accessing mCameraProviderByCameraId and mTorchProviderByCameraId
+    std::mutex mProviderInterfaceMapLock;
+
     struct ProviderInfo :
             virtual public hardware::camera::provider::V2_4::ICameraProviderCallback,
             virtual public hardware::hidl_death_recipient
     {
         const std::string mProviderName;
-        const sp<hardware::camera::provider::V2_4::ICameraProvider> mInterface;
         const metadata_vendor_id_t mProviderTagid;
+        sp<VendorTagDescriptor> mVendorTagDescriptor;
+        bool mSetTorchModeSupported;
+        bool mIsRemote;
+
+        // This pointer is used to keep a reference to the ICameraProvider that was last accessed.
+        wp<hardware::camera::provider::V2_4::ICameraProvider> mActiveInterface;
+
+        sp<hardware::camera::provider::V2_4::ICameraProvider> mSavedInterface;
 
         ProviderInfo(const std::string &providerName,
-                sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
                 CameraProviderManager *manager);
         ~ProviderInfo();
 
-        status_t initialize();
+        status_t initialize(sp<hardware::camera::provider::V2_4::ICameraProvider>& interface);
+
+        const sp<hardware::camera::provider::V2_4::ICameraProvider> startProviderInterface();
 
         const std::string& getType() const;
 
@@ -286,6 +337,11 @@
         // hidl_death_recipient interface - this locks the parent mInterfaceMutex
         virtual void serviceDied(uint64_t cookie, const wp<hidl::base::V1_0::IBase>& who) override;
 
+        /**
+         * Setup vendor tags for this provider
+         */
+        status_t setUpVendorTags();
+
         // Basic device information, common to all camera devices
         struct DeviceInfo {
             const std::string mName;  // Full instance name
@@ -294,16 +350,20 @@
             const metadata_vendor_id_t mProviderTagid;
             bool mIsLogicalCamera;
             std::vector<std::string> mPhysicalIds;
+            hardware::CameraInfo mInfo;
+            sp<IBase> mSavedInterface;
 
             const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
 
             hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
 
+            sp<ProviderInfo> mParentProvider;
+
             bool hasFlashUnit() const { return mHasFlashUnit; }
             virtual status_t setTorchMode(bool enabled) = 0;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
             virtual bool isAPI1Compatible() const = 0;
-            virtual status_t dumpState(int fd) const = 0;
+            virtual status_t dumpState(int fd) = 0;
             virtual status_t getCameraCharacteristics(CameraMetadata *characteristics) const {
                 (void) characteristics;
                 return INVALID_OPERATION;
@@ -315,14 +375,25 @@
                 return INVALID_OPERATION;
             }
 
+            virtual status_t isSessionConfigurationSupported(
+                    const hardware::camera::device::V3_4::StreamConfiguration &/*configuration*/,
+                    bool * /*status*/) {
+                return INVALID_OPERATION;
+            }
+
+            template<class InterfaceT>
+            sp<InterfaceT> startDeviceInterface();
+
             DeviceInfo(const std::string& name, const metadata_vendor_id_t tagId,
                     const std::string &id, const hardware::hidl_version& version,
                     const std::vector<std::string>& publicCameraIds,
-                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost) :
+                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    sp<ProviderInfo> parentProvider) :
                     mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
                     mIsLogicalCamera(false), mResourceCost(resourceCost),
                     mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
-                    mHasFlashUnit(false), mPublicCameraIds(publicCameraIds) {}
+                    mParentProvider(parentProvider), mHasFlashUnit(false),
+                    mPublicCameraIds(publicCameraIds) {}
             virtual ~DeviceInfo();
         protected:
             bool mHasFlashUnit;
@@ -330,6 +401,14 @@
 
             template<class InterfaceT>
             static status_t setTorchMode(InterfaceT& interface, bool enabled);
+
+            template<class InterfaceT>
+            status_t setTorchModeForDevice(bool enabled) {
+                // Don't save the ICameraProvider interface here because we assume that this was
+                // called from CameraProviderManager::setTorchMode(), which does save it.
+                const sp<InterfaceT> interface = startDeviceInterface<InterfaceT>();
+                return DeviceInfo::setTorchMode(interface, enabled);
+            }
         };
         std::vector<std::unique_ptr<DeviceInfo>> mDevices;
         std::unordered_set<std::string> mUniqueCameraIds;
@@ -345,40 +424,45 @@
         // HALv1-specific camera fields, including the actual device interface
         struct DeviceInfo1 : public DeviceInfo {
             typedef hardware::camera::device::V1_0::ICameraDevice InterfaceT;
-            const sp<InterfaceT> mInterface;
 
             virtual status_t setTorchMode(bool enabled) override;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
             //In case of Device1Info assume that we are always API1 compatible
             virtual bool isAPI1Compatible() const override { return true; }
-            virtual status_t dumpState(int fd) const override;
+            virtual status_t dumpState(int fd) override;
             DeviceInfo1(const std::string& name, const metadata_vendor_id_t tagId,
                     const std::string &id, uint16_t minorVersion,
                     const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    sp<ProviderInfo> parentProvider,
                     const std::vector<std::string>& publicCameraIds,
                     sp<InterfaceT> interface);
             virtual ~DeviceInfo1();
         private:
             CameraParameters2 mDefaultParameters;
+            status_t cacheCameraInfo(sp<InterfaceT> interface);
         };
 
         // HALv3-specific camera fields, including the actual device interface
         struct DeviceInfo3 : public DeviceInfo {
             typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
-            const sp<InterfaceT> mInterface;
 
             virtual status_t setTorchMode(bool enabled) override;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
             virtual bool isAPI1Compatible() const override;
-            virtual status_t dumpState(int fd) const override;
+            virtual status_t dumpState(int fd) override;
             virtual status_t getCameraCharacteristics(
                     CameraMetadata *characteristics) const override;
             virtual status_t getPhysicalCameraCharacteristics(const std::string& physicalCameraId,
                     CameraMetadata *characteristics) const override;
+            virtual status_t isSessionConfigurationSupported(
+                    const hardware::camera::device::V3_4::StreamConfiguration &configuration,
+                    bool *status /*out*/)
+                    override;
 
             DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
                     const std::string &id, uint16_t minorVersion,
                     const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    sp<ProviderInfo> parentProvider,
                     const std::vector<std::string>& publicCameraIds, sp<InterfaceT> interface);
             virtual ~DeviceInfo3();
         private:
@@ -405,11 +489,11 @@
         template<class DeviceInfoT>
         std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &name,
                 const metadata_vendor_id_t tagId, const std::string &id,
-                uint16_t minorVersion) const;
+                uint16_t minorVersion);
 
         // Helper for initializeDeviceInfo to use the right CameraProvider get method.
         template<class InterfaceT>
-        sp<InterfaceT> getDeviceInterface(const std::string &name) const;
+        sp<InterfaceT> startDeviceInterface(const std::string &name);
 
         // Parse provider instance name for type and id
         static status_t parseProviderName(const std::string& name,
@@ -443,6 +527,14 @@
 
     std::vector<sp<ProviderInfo>> mProviders;
 
+    void addProviderToMap(
+            const std::string &cameraId,
+            sp<hardware::camera::provider::V2_4::ICameraProvider> provider,
+            bool isTorchUsage);
+    void removeCameraIdFromMap(
+        std::unordered_map<std::string, sp<hardware::camera::provider::V2_4::ICameraProvider>> &map,
+        const std::string &cameraId);
+
     static const char* deviceStatusToString(
         const hardware::camera::common::V1_0::CameraDeviceStatus&);
     static const char* torchStatusToString(
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 419ac42..e5a38bb 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -194,6 +194,28 @@
      */
     status_t dropStreamBuffers(bool dropping, int streamId) override;
 
+    /**
+     * Helper functions to map between framework and HIDL values
+     */
+    static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
+    static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
+            android_dataspace dataSpace);
+    static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint64_t usage);
+    static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
+            camera3_stream_rotation_t rotation);
+    // Returns a negative error code if the passed-in operation mode is not valid.
+    static status_t mapToStreamConfigurationMode(camera3_stream_configuration_mode_t operationMode,
+            /*out*/ hardware::camera::device::V3_2::StreamConfigurationMode *mode);
+    static camera3_buffer_status_t mapHidlBufferStatus(
+            hardware::camera::device::V3_2::BufferStatus status);
+    static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
+    static android_dataspace mapToFrameworkDataspace(
+            hardware::camera::device::V3_2::DataspaceFlags);
+    static uint64_t mapConsumerToFrameworkUsage(
+            hardware::camera::device::V3_2::BufferUsageFlags usage);
+    static uint64_t mapProducerToFrameworkUsage(
+            hardware::camera::device::V3_2::BufferUsageFlags usage);
+
   private:
 
     status_t disconnectImpl();
@@ -679,27 +701,6 @@
      */
     static nsecs_t getMonoToBoottimeOffset();
 
-    /**
-     * Helper functions to map between framework and HIDL values
-     */
-    static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
-    static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
-            android_dataspace dataSpace);
-    static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint64_t usage);
-    static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
-            camera3_stream_rotation_t rotation);
-    // Returns a negative error code if the passed-in operation mode is not valid.
-    static status_t mapToStreamConfigurationMode(camera3_stream_configuration_mode_t operationMode,
-            /*out*/ hardware::camera::device::V3_2::StreamConfigurationMode *mode);
-    static camera3_buffer_status_t mapHidlBufferStatus(hardware::camera::device::V3_2::BufferStatus status);
-    static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
-    static android_dataspace mapToFrameworkDataspace(
-            hardware::camera::device::V3_2::DataspaceFlags);
-    static uint64_t mapConsumerToFrameworkUsage(
-            hardware::camera::device::V3_2::BufferUsageFlags usage);
-    static uint64_t mapProducerToFrameworkUsage(
-            hardware::camera::device::V3_2::BufferUsageFlags usage);
-
     struct RequestTrigger {
         // Metadata tag number, e.g. android.control.aePrecaptureTrigger
         uint32_t metadataTag;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 219cc24..8cd575d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -663,12 +663,11 @@
     return res;
 }
 
-status_t Camera3OutputStream::getEndpointUsageForSurface(uint64_t *usage,
-        const sp<Surface>& surface) const {
-    status_t res;
-    uint64_t u = 0;
+void Camera3OutputStream::applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/) {
+    if (consumerUsage == nullptr) {
+        return;
+    }
 
-    res = native_window_get_consumer_usage(static_cast<ANativeWindow*>(surface.get()), &u);
     // If an opaque output stream's endpoint is ImageReader, add
     // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used
     // for the ZSL use case.
@@ -677,12 +676,20 @@
     //     2. GRALLOC_USAGE_HW_RENDER
     //     3. GRALLOC_USAGE_HW_COMPOSER
     //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
-    if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
-            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+    if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+            (*consumerUsage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
             GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
-        u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
+        *consumerUsage |= GRALLOC_USAGE_HW_CAMERA_ZSL;
     }
+}
 
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint64_t *usage,
+        const sp<Surface>& surface) const {
+    status_t res;
+    uint64_t u = 0;
+
+    res = native_window_get_consumer_usage(static_cast<ANativeWindow*>(surface.get()), &u);
+    applyZSLUsageQuirk(camera3_stream::format, &u);
     *usage = u;
     return res;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 410905d..2128da2 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -201,6 +201,11 @@
             const std::vector<size_t> &removedSurfaceIds,
             KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
 
+    /**
+     * Apply ZSL related consumer usage quirk.
+     */
+    static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
+
   protected:
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index 76ed6f6..582ce34 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -101,12 +101,14 @@
 bool convertFromHidl(const HCameraMetadata &src, CameraMetadata *dst) {
     const camera_metadata_t *buffer = reinterpret_cast<const camera_metadata_t*>(src.data());
     size_t expectedSize = src.size();
-    int res = validate_camera_metadata_structure(buffer, &expectedSize);
-    if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
-        *dst = buffer;
-    } else {
-        ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
-        return false;
+    if (buffer != nullptr) {
+        int res = validate_camera_metadata_structure(buffer, &expectedSize);
+        if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
+            *dst = buffer;
+        } else {
+            ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+            return false;
+        }
     }
     return true;
 }
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index 8d80ff1..ad9963a 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -30,7 +30,8 @@
     android.hardware.camera.common@1.0 \
     android.hardware.camera.provider@2.4 \
     android.hardware.camera.device@1.0 \
-    android.hardware.camera.device@3.2
+    android.hardware.camera.device@3.2 \
+    android.hardware.camera.device@3.4
 
 LOCAL_C_INCLUDES += \
     system/media/private/camera/include \
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
new file mode 100644
index 0000000..653317b
--- /dev/null
+++ b/services/mediacodec/registrant/Android.bp
@@ -0,0 +1,53 @@
+cc_library_shared {
+    name: "libmedia_codecserviceregistrant",
+    srcs: [
+        "CodecServiceRegistrant.cpp",
+    ],
+
+    header_libs: [
+        "libmedia_headers",
+    ],
+
+    shared_libs: [
+        "android.hardware.media.c2@1.0",
+        "libbase",
+        "libcodec2_hidl@1.0",
+        "libcodec2_vndk",
+        "libutils",
+    ],
+
+    // Codecs
+    runtime_libs: [
+        "libcodec2_soft_avcdec",
+        "libcodec2_soft_avcenc",
+        "libcodec2_soft_aacdec",
+        "libcodec2_soft_aacenc",
+        "libcodec2_soft_amrnbdec",
+        "libcodec2_soft_amrnbenc",
+        "libcodec2_soft_amrwbdec",
+        "libcodec2_soft_amrwbenc",
+        "libcodec2_soft_hevcdec",
+        "libcodec2_soft_g711alawdec",
+        "libcodec2_soft_g711mlawdec",
+        "libcodec2_soft_mpeg2dec",
+        "libcodec2_soft_h263dec",
+        "libcodec2_soft_h263enc",
+        "libcodec2_soft_mpeg4dec",
+        "libcodec2_soft_mpeg4enc",
+        "libcodec2_soft_mp3dec",
+        "libcodec2_soft_vorbisdec",
+        "libcodec2_soft_opusdec",
+        "libcodec2_soft_vp8dec",
+        "libcodec2_soft_vp9dec",
+        "libcodec2_soft_vp8enc",
+        "libcodec2_soft_vp9enc",
+        "libcodec2_soft_rawdec",
+        "libcodec2_soft_flacdec",
+        "libcodec2_soft_flacenc",
+        "libcodec2_soft_gsmdec",
+        "libcodec2_soft_xaacdec",
+    ],
+
+    compile_multilib: "32",
+}
+
diff --git a/media/codec2/hidl/services/C2SoftwareCodecServiceRegistrant.cpp b/services/mediacodec/registrant/CodecServiceRegistrant.cpp
similarity index 74%
rename from media/codec2/hidl/services/C2SoftwareCodecServiceRegistrant.cpp
rename to services/mediacodec/registrant/CodecServiceRegistrant.cpp
index e10ae6e..706ebee 100644
--- a/media/codec2/hidl/services/C2SoftwareCodecServiceRegistrant.cpp
+++ b/services/mediacodec/registrant/CodecServiceRegistrant.cpp
@@ -15,26 +15,30 @@
  */
 
 //#define LOG_NDEBUG 0
-#define LOG_TAG "C2SoftwareCodecServiceRegistrant"
+#define LOG_TAG "CodecServiceRegistrant"
+
+#include <android-base/logging.h>
 
 #include <C2PlatformSupport.h>
 #include <codec2/hidl/1.0/ComponentStore.h>
 #include <media/CodecServiceRegistrant.h>
-#include <log/log.h>
 
 extern "C" void RegisterCodecServices() {
     using namespace ::android::hardware::media::c2::V1_0;
+    LOG(INFO) << "Creating software Codec2 service...";
     android::sp<IComponentStore> store =
         new utils::ComponentStore(
                 android::GetCodec2PlatformComponentStore());
     if (store == nullptr) {
-        ALOGE("Cannot create Codec2's IComponentStore software service.");
+        LOG(ERROR) <<
+                "Cannot create software Codec2 service.";
     } else {
         if (store->registerAsService("software") != android::OK) {
-            ALOGE("Cannot register Codec2's "
-                    "IComponentStore software service.");
+            LOG(ERROR) <<
+                    "Cannot register software Codec2 service.";
         } else {
-            ALOGI("Codec2's IComponentStore software service created.");
+            LOG(INFO) <<
+                    "Software Codec2 service created.";
         }
     }
 }
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 73c9535..7c9c727 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -9,6 +9,24 @@
 
 LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
 LOCAL_MODULE:= libmediaextractorservice
+
+sanitizer_runtime_libraries := $(call normalize-path-list,$(addsuffix .so,\
+  $(ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
+  $(UBSAN_RUNTIME_LIBRARY) \
+  $(TSAN_RUNTIME_LIBRARY)))
+
+# $(info Sanitizer:  $(sanitizer_runtime_libraries))
+
+ndk_libraries := $(call normalize-path-list,$(addprefix lib,$(addsuffix .so,\
+  $(NDK_PREBUILT_SHARED_LIBRARIES))))
+
+# $(info NDK:  $(ndk_libraries))
+
+LOCAL_CFLAGS += -DLINKED_LIBRARIES='"$(sanitizer_runtime_libraries):$(ndk_libraries)"'
+
+sanitizer_runtime_libraries :=
+ndk_libraries :=
+
 include $(BUILD_SHARED_LIBRARY)
 
 
@@ -22,16 +40,10 @@
 
 # extractor libraries
 LOCAL_REQUIRED_MODULES += \
-    libaacextractor \
-    libamrextractor \
-    libflacextractor \
-    libmidiextractor \
     libmkvextractor \
-    libmp3extractor \
     libmp4extractor \
     libmpeg2extractor \
     liboggextractor \
-    libwavextractor \
 
 LOCAL_SRC_FILES := main_extractorservice.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index f4d8b43..8b26178 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -29,6 +29,11 @@
 
 namespace android {
 
+MediaExtractorService::MediaExtractorService()
+        : BnMediaExtractorService() {
+    MediaExtractorFactory::SetLinkedLibraries(std::string(LINKED_LIBRARIES));
+}
+
 sp<IMediaExtractor> MediaExtractorService::makeExtractor(
         const sp<IDataSource> &remoteSource, const char *mime) {
     ALOGV("@@@ MediaExtractorService::makeExtractor for %s", mime);
diff --git a/services/mediaextractor/MediaExtractorService.h b/services/mediaextractor/MediaExtractorService.h
index 9df3ecd..6007004 100644
--- a/services/mediaextractor/MediaExtractorService.h
+++ b/services/mediaextractor/MediaExtractorService.h
@@ -27,7 +27,7 @@
 {
     friend class BinderService<MediaExtractorService>;    // for MediaExtractorService()
 public:
-    MediaExtractorService() : BnMediaExtractorService() { }
+    MediaExtractorService();
     virtual ~MediaExtractorService() { }
     virtual void onFirstRef() { }
 
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index b1854bf..cca1895 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -172,7 +172,7 @@
 
         aaudio_result_t result = endpoint->open(request);
         if (result != AAUDIO_OK) {
-            ALOGE("openExclusiveEndpoint(), open failed");
+            ALOGV("openExclusiveEndpoint(), open failed");
             endpoint.clear();
         } else {
             mExclusiveStreams.push_back(endpointMMap);
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 5ec997c..2fbaeb4 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -118,7 +118,7 @@
         }
     }
 
-    // If SHARED requested or if EXCLUSIVE failed.
+    // Try SHARED if SHARED requested or if EXCLUSIVE failed.
     if (sharingMode == AAUDIO_SHARING_MODE_SHARED) {
         serviceStream =  new AAudioServiceStreamShared(*this);
         result = serviceStream->open(request);
@@ -132,8 +132,7 @@
 
     if (result != AAUDIO_OK) {
         serviceStream.clear();
-        ALOGE("openStream(): failed, return %d = %s",
-              result, AAudio_convertResultToText(result));
+        ALOGW("openStream(): failed, return %d = %s", result, AAudio_convertResultToText(result));
         return result;
     } else {
         aaudio_handle_t handle = mStreamTracker.addStreamForHandle(serviceStream.get());
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 24ab65e..539735a 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -38,10 +38,6 @@
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
-AAudioServiceEndpoint::~AAudioServiceEndpoint() {
-    ALOGD("%s(%p) destroyed", __func__, this);
-}
-
 std::string AAudioServiceEndpoint::dump() const {
     std::stringstream result;
 
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index a134a13..43b0a37 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -41,7 +41,7 @@
         , public AAudioStreamParameters {
 public:
 
-    virtual ~AAudioServiceEndpoint();
+    virtual ~AAudioServiceEndpoint() = default;
 
     virtual std::string dump() const;
 
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 18fcd35..e4dbee1 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -101,8 +101,6 @@
             .flags = AUDIO_FLAG_LOW_LATENCY,
             .tags = ""
     };
-    ALOGD("%s(%p) MMAP attributes.usage = %d, content_type = %d, source = %d",
-          __func__, this, attributes.usage, attributes.content_type, attributes.source);
 
     mMmapClient.clientUid = request.getUserId();
     mMmapClient.clientPid = request.getProcessId();
@@ -163,12 +161,12 @@
     ALOGD("%s() mMapClient.uid = %d, pid = %d => portHandle = %d\n",
           __func__, mMmapClient.clientUid,  mMmapClient.clientPid, mPortHandle);
     if (status != OK) {
-        ALOGE("%s() openMmapStream() returned status %d",  __func__, status);
+        ALOGE("%s() - openMmapStream() returned status %d",  __func__, status);
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
     if (deviceId == AAUDIO_UNSPECIFIED) {
-        ALOGW("%s() openMmapStream() failed to set deviceId", __func__);
+        ALOGW("%s() - openMmapStream() failed to set deviceId", __func__);
     }
     setDeviceId(deviceId);
 
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index a274466..923a1a4 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -47,10 +47,6 @@
     mStreamInternal = &mStreamInternalPlay;
 }
 
-AAudioServiceEndpointPlay::~AAudioServiceEndpointPlay() {
-    ALOGD("%s(%p) destroyed", __func__, this);
-}
-
 aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) {
     aaudio_result_t result = AAudioServiceEndpointShared::open(request);
     if (result == AAUDIO_OK) {
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
index a0a383c..981e430 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.h
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -39,7 +39,6 @@
 class AAudioServiceEndpointPlay : public AAudioServiceEndpointShared {
 public:
     explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
-    virtual ~AAudioServiceEndpointPlay();
 
     aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
index d671710..bfc1744 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.h
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -52,8 +52,7 @@
 
     aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
 
-    virtual void            *callbackLoop() = 0;
-
+    virtual void   *callbackLoop() = 0;
 
 protected:
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 12be4a3..354b36a 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -51,7 +51,6 @@
 }
 
 AAudioServiceStreamBase::~AAudioServiceStreamBase() {
-    ALOGD("~AAudioServiceStreamBase() destroying %p", this);
     // If the stream is deleted when OPEN or in use then audio resources will leak.
     // This would indicate an internal error. So we want to find this ASAP.
     LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
@@ -110,7 +109,6 @@
         mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService,
                                                          request);
         if (mServiceEndpoint == nullptr) {
-            ALOGE("%s() openEndpoint() failed", __func__);
             result = AAUDIO_ERROR_UNAVAILABLE;
             goto error;
         }
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index dbc2c2e..d5450fe 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -128,7 +128,6 @@
 
     aaudio_result_t result = AAudioServiceStreamBase::open(request);
     if (result != AAUDIO_OK) {
-        ALOGE("%s() returned %d", __func__, result);
         return result;
     }