Merge "audiopolicy: fix AudioRecordingConfiguration callback"
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 09b85d5..691996b 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -287,3 +287,16 @@
     }
     return device->createCaptureSession(outputs, sessionParameters, callbacks, session);
 }
+
+EXPORT
+camera_status_t ACameraDevice_isSessionConfigurationSupported(
+        const ACameraDevice* device,
+        const ACaptureSessionOutputContainer* sessionOutputContainer) {
+    ATRACE_CALL();
+    if (device == nullptr || sessionOutputContainer == nullptr) {
+        ALOGE("%s: Error: invalid input: device %p, sessionOutputContainer %p",
+                __FUNCTION__, device, sessionOutputContainer);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return device->isSessionConfigurationSupported(sessionOutputContainer);
+}
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 5e4fcd0..c9db01e 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -227,6 +227,55 @@
     return ACAMERA_OK;
 }
 
+camera_status_t CameraDevice::isSessionConfigurationSupported(
+        const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+    Mutex::Autolock _l(mDeviceLock);
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        return ret;
+    }
+
+    SessionConfiguration sessionConfiguration(0 /*inputWidth*/, 0 /*inputHeight*/,
+            -1 /*inputFormat*/, CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
+    for (const auto& output : sessionOutputContainer->mOutputs) {
+        sp<IGraphicBufferProducer> iGBP(nullptr);
+        ret = getIGBPfromAnw(output.mWindow, iGBP);
+        if (ret != ACAMERA_OK) {
+            ALOGE("Camera device %s failed to extract graphic producer from native window",
+                    getId());
+            return ret;
+        }
+
+        String16 physicalId16(output.mPhysicalCameraId.c_str());
+        OutputConfiguration outConfig(iGBP, output.mRotation, physicalId16,
+                OutputConfiguration::INVALID_SET_ID, true);
+
+        for (auto& anw : output.mSharedWindows) {
+            ret = getIGBPfromAnw(anw, iGBP);
+            if (ret != ACAMERA_OK) {
+                ALOGE("Camera device %s failed to extract graphic producer from native window",
+                        getId());
+                return ret;
+            }
+            outConfig.addGraphicProducer(iGBP);
+        }
+
+        sessionConfiguration.addOutputConfiguration(outConfig);
+    }
+
+    bool supported = false;
+    binder::Status remoteRet = mRemote->isSessionConfigurationSupported(
+            sessionConfiguration, &supported);
+    if (remoteRet.serviceSpecificErrorCode() ==
+            hardware::ICameraService::ERROR_INVALID_OPERATION) {
+        return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
+    } else if (!remoteRet.isOk()) {
+        return ACAMERA_ERROR_UNKNOWN;
+    } else {
+        return supported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+    }
+}
+
 camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
     camera_status_t ret = checkCameraClosedOrErrorLocked();
     if (ret != ACAMERA_OK) {
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 103efd5..56741ce 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -35,6 +35,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <camera/CaptureResult.h>
 #include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/CaptureRequest.h>
 
 #include <camera/NdkCameraManager.h>
@@ -77,6 +78,9 @@
             const ACameraCaptureSession_stateCallbacks* callbacks,
             /*out*/ACameraCaptureSession** session);
 
+    camera_status_t isSessionConfigurationSupported(
+            const ACaptureSessionOutputContainer* sessionOutputContainer) const;
+
     // Callbacks from camera service
     class ServiceCallback : public hardware::camera2::BnCameraDeviceCallbacks {
       public:
@@ -369,6 +373,11 @@
         return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
     }
 
+    camera_status_t isSessionConfigurationSupported(
+            const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+        return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
+    }
+
     /***********************
      * Device interal APIs *
      ***********************/
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index cedf83a..bc544e3 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -845,6 +845,43 @@
         const ACameraIdList* physicalIdList,
         /*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
 
+/**
+ * Check whether a particular {@ACaptureSessionOutputContainer} is supported by
+ * the camera device.
+ *
+ * <p>This method performs a runtime check of a given {@link
+ * ACaptureSessionOutputContainer}. The result confirms whether or not the
+ * passed CaptureSession outputs can be successfully used to create a camera
+ * capture session using {@link ACameraDevice_createCaptureSession}.</p>
+ *
+ * <p>This method can be called at any point before, during and after active
+ * capture session. It must not impact normal camera behavior in any way and
+ * must complete significantly faster than creating a capture session.</p>
+ *
+ * <p>Although this method is faster than creating a new capture session, it is not intended
+ * to be used for exploring the entire space of supported stream combinations.</p>
+ *
+ * @param device the camera device of interest
+ * @param sessionOutputContainer the {@link ACaptureSessionOutputContainer} of
+ *                               interest.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the given {@link ACaptureSessionOutputContainer}
+ *                                is supported by the camera device.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device, or sessionOutputContainer
+ *                                                     is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_STREAM_CONFIGURE_FAIL} if the given
+ *                                                         {@link ACaptureSessionOutputContainer}
+ *                                                         is not supported by
+ *                                                         the camera
+ *                                                         device.</li>
+ *        <li>{@link ACAMERA_ERROR_UNSUPPORTED_OPERATION} if the query operation is not
+ *                                                        supported by the camera device.</li>
+ */
+camera_status_t ACameraDevice_isSessionConfigurationSupported(
+        const ACameraDevice* device,
+        const ACaptureSessionOutputContainer* sessionOutputContainer) __INTRODUCED_IN(29);
+
 #endif /* __ANDROID_API__ >= 29 */
 
 __END_DECLS
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index 6b58155..fc618ee 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -106,7 +106,8 @@
 
     /**
      * Camera device does not support the stream configuration provided by application in
-     * {@link ACameraDevice_createCaptureSession}.
+     * {@link ACameraDevice_createCaptureSession} or {@link
+     * ACameraDevice_isSessionConfigurationSupported}.
      */
     ACAMERA_ERROR_STREAM_CONFIGURE_FAIL = ACAMERA_ERROR_BASE - 9,
 
@@ -130,6 +131,11 @@
      * The application does not have permission to open camera.
      */
     ACAMERA_ERROR_PERMISSION_DENIED     = ACAMERA_ERROR_BASE - 13,
+
+    /**
+     * The operation is not supported by the camera device.
+     */
+    ACAMERA_ERROR_UNSUPPORTED_OPERATION = ACAMERA_ERROR_BASE - 14,
 } camera_status_t;
 
 #endif /* __ANDROID_API__ >= 24 */
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 946a98e..b6f1553 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -14,6 +14,7 @@
     ACameraDevice_createCaptureRequest_withPhysicalIds; # introduced=29
     ACameraDevice_createCaptureSession;
     ACameraDevice_createCaptureSessionWithSessionParameters; # introduced=28
+    ACameraDevice_isSessionConfigurationSupported; # introduced=29
     ACameraDevice_getId;
     ACameraManager_create;
     ACameraManager_delete;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index a38a31e..d7d774b 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -38,6 +38,7 @@
 
 using HCameraMetadata = frameworks::cameraservice::device::V2_0::CameraMetadata;
 using OutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
+using SessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
 using hardware::Void;
 
 // Static member definitions
@@ -216,6 +217,47 @@
     return ACAMERA_OK;
 }
 
+camera_status_t CameraDevice::isSessionConfigurationSupported(
+        const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+    Mutex::Autolock _l(mDeviceLock);
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        return ret;
+    }
+
+    SessionConfiguration sessionConfig;
+    sessionConfig.inputWidth = 0;
+    sessionConfig.inputHeight = 0;
+    sessionConfig.inputFormat = -1;
+    sessionConfig.operationMode = StreamConfigurationMode::NORMAL_MODE;
+    sessionConfig.outputStreams.resize(sessionOutputContainer->mOutputs.size());
+    size_t index = 0;
+    for (const auto& output : sessionOutputContainer->mOutputs) {
+        sessionConfig.outputStreams[index].rotation = utils::convertToHidl(output.mRotation);
+        sessionConfig.outputStreams[index].windowGroupId = -1;
+        sessionConfig.outputStreams[index].windowHandles.resize(output.mSharedWindows.size() + 1);
+        sessionConfig.outputStreams[index].windowHandles[0] = output.mWindow;
+        sessionConfig.outputStreams[index].physicalCameraId = output.mPhysicalCameraId;
+        index++;
+    }
+
+    bool configSupported = false;
+    Status status = Status::NO_ERROR;
+    auto remoteRet = mRemote->isSessionConfigurationSupported(sessionConfig,
+        [&status, &configSupported](auto s, auto supported) {
+            status = s;
+            configSupported = supported;
+        });
+
+    if (status == Status::INVALID_OPERATION) {
+        return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
+    } else if (!remoteRet.isOk()) {
+        return ACAMERA_ERROR_UNKNOWN;
+    } else {
+        return configSupported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+    }
+}
+
 void CameraDevice::addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest,
         sp<CaptureRequest> &req) {
     CameraMetadata metadataCopy = aCaptureRequest->settings->getInternalData();
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 28092fd..47e6f56 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -101,6 +101,9 @@
             const ACameraCaptureSession_stateCallbacks* callbacks,
             /*out*/ACameraCaptureSession** session);
 
+    camera_status_t isSessionConfigurationSupported(
+            const ACaptureSessionOutputContainer* sessionOutputContainer) const;
+
     // Callbacks from camera service
     class ServiceCallback : public ICameraDeviceCallback {
       public:
@@ -397,6 +400,11 @@
         return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
     }
 
+    camera_status_t isSessionConfigurationSupported(
+            const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+        return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
+    }
+
     /***********************
      * Device interal APIs *
      ***********************/
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 2398922..c51f93b 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -121,6 +121,12 @@
         cameraIdList.numCameras = idPointerList.size();
         cameraIdList.cameraIds = idPointerList.data();
 
+        ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
+        if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
+            ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
+            return ret;
+        }
+
         ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
         if (ret != AMEDIA_OK) {
             ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index 78a444b..f10835f 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -36,13 +36,18 @@
     ldflags: ["-Wl,-Bsymbolic"],
 }
 
+filegroup {
+    name: "codec2_soft_exports",
+    srcs: [ "exports.lds" ],
+}
+
 // public dependency for software codec implementation
 // to be used by code under media/codecs/* only as its stability is not guaranteed
 cc_defaults {
     name: "libcodec2_soft-defaults",
     defaults: ["libcodec2-impl-defaults"],
     vendor_available: true,
-
+    version_script: ":codec2_soft_exports",
     export_shared_lib_headers: [
         "libsfplugin_ccodec_utils",
     ],
diff --git a/media/codec2/components/base/exports.lds b/media/codec2/components/base/exports.lds
new file mode 100644
index 0000000..641bae8
--- /dev/null
+++ b/media/codec2/components/base/exports.lds
@@ -0,0 +1,7 @@
+{
+    global:
+        CreateCodec2Factory;
+        DestroyCodec2Factory;
+    local: *;
+};
+
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 6da131f..d62944a 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -589,6 +589,21 @@
     bool mIsHdr10Plus;
 };
 
+struct Av1ProfileLevelMapper : ProfileLevelMapperHelper {
+    virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+        return sAv1Levels.map(from, to);
+    }
+    virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+        return sAv1Levels.map(from, to);
+    }
+    virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+        return sAv1Profiles.map(from, to);
+    }
+    virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+        return sAv1Profiles.map(from, to);
+    }
+};
+
 } // namespace
 
 // static
@@ -613,6 +628,8 @@
         return std::make_shared<Vp8ProfileLevelMapper>();
     } else if (mediaType == MIMETYPE_VIDEO_VP9) {
         return std::make_shared<Vp9ProfileLevelMapper>();
+    } else if (mediaType == MIMETYPE_VIDEO_AV1) {
+        return std::make_shared<Av1ProfileLevelMapper>();
     }
     return nullptr;
 }
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 20cc643..d6d24c1 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -1336,6 +1336,13 @@
     mReader = NULL;
 
     delete mDataSource;
+
+    for (size_t i = 0; i < mTracks.size(); ++i) {
+        TrackInfo *info = &mTracks.editItemAt(i);
+        if (info->mMeta) {
+            AMediaFormat_delete(info->mMeta);
+        }
+    }
 }
 
 size_t MatroskaExtractor::countTracks() {
@@ -1808,6 +1815,8 @@
 void MatroskaExtractor::addTracks() {
     const mkvparser::Tracks *tracks = mSegment->GetTracks();
 
+    AMediaFormat *meta = nullptr;
+
     for (size_t index = 0; index < tracks->GetTracksCount(); ++index) {
         const mkvparser::Track *track = tracks->GetTrackByIndex(index);
 
@@ -1832,7 +1841,11 @@
 
         enum { VIDEO_TRACK = 1, AUDIO_TRACK = 2 };
 
-        AMediaFormat *meta = AMediaFormat_new();
+        if (meta) {
+            AMediaFormat_clear(meta);
+        } else {
+            meta = AMediaFormat_new();
+        }
 
         status_t err = OK;
         int32_t nalSize = -1;
@@ -2067,21 +2080,26 @@
         long long durationNs = mSegment->GetDuration();
         AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_DURATION, (durationNs + 500) / 1000);
 
+        const char *mimetype = "";
+        if (!AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype)) {
+            // do not add this track to the track list
+            ALOGW("ignoring track with unknown mime");
+            continue;
+        }
+
         mTracks.push();
         size_t n = mTracks.size() - 1;
         TrackInfo *trackInfo = &mTracks.editItemAt(n);
         initTrackInfo(track, meta, trackInfo);
         trackInfo->mNalLengthSize = nalSize;
 
-        const char *mimetype = "";
-        AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype);
-
         if ((!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) ||
             (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_AVC) && isSetCsdFrom1stFrame)) {
             // Attempt to recover from AVC track without codec private data
             err = synthesizeAVCC(trackInfo, n);
             if (err != OK) {
                 mTracks.pop();
+                continue;
             }
         } else if ((!strcmp("V_MPEG2", codecID) && codecPrivateSize == 0) ||
             (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG2) && isSetCsdFrom1stFrame)) {
@@ -2089,6 +2107,7 @@
             err = synthesizeMPEG2(trackInfo, n);
             if (err != OK) {
                 mTracks.pop();
+                continue;
             }
         } else if ((!strcmp("V_MPEG4/ISO/ASP", codecID) && codecPrivateSize == 0) ||
             (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG4) && isSetCsdFrom1stFrame) ||
@@ -2099,9 +2118,14 @@
             err = synthesizeMPEG4(trackInfo, n);
             if (err != OK) {
                 mTracks.pop();
+                continue;
             }
         }
-
+        // the TrackInfo owns the metadata now
+        meta = nullptr;
+    }
+    if (meta) {
+        AMediaFormat_delete(meta);
     }
 }
 
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
index d53d9e3..99fad17 100644
--- a/media/extractors/mkv/MatroskaExtractor.h
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -61,10 +61,8 @@
         TrackInfo() {
             mMeta = NULL;
         }
+
         ~TrackInfo() {
-            if (mMeta) {
-                AMediaFormat_delete(mMeta);
-            }
         }
         unsigned long mTrackNum;
         bool mEncrypted;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 22819cb..5ff1c59 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -3675,8 +3675,10 @@
 
     void *tmpData;
     size_t tmpDataSize;
+    const char *s;
     if (size >= 8 && metadataKey &&
-            !AMediaFormat_getBuffer(mFileMetaData, metadataKey, &tmpData, &tmpDataSize)) {
+            !AMediaFormat_getBuffer(mFileMetaData, metadataKey, &tmpData, &tmpDataSize) &&
+            !AMediaFormat_getString(mFileMetaData, metadataKey, &s)) {
         if (!strcmp(metadataKey, "albumart")) {
             AMediaFormat_setBuffer(mFileMetaData, metadataKey,
                     buffer + 8, size - 8);
@@ -3918,10 +3920,9 @@
         };
         static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
 
-        void *tmpData;
-        size_t tmpDataSize;
         for (size_t i = 0; i < kNumMapEntries; ++i) {
-            if (!AMediaFormat_getBuffer(mFileMetaData, kMap[i].key, &tmpData, &tmpDataSize)) {
+            const char *ss;
+            if (!AMediaFormat_getString(mFileMetaData, kMap[i].key, &ss)) {
                 ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
                 if (it->done()) {
                     delete it;
@@ -5318,7 +5319,9 @@
 }
 
 int32_t MPEG4Source::parseHEVCLayerId(const uint8_t *data, size_t size) {
-    CHECK(data != nullptr && size >= (mNALLengthSize + 2));
+    if (data == nullptr || size < mNALLengthSize + 2) {
+        return -1;
+    }
 
     // HEVC NAL-header (16-bit)
     //  1   6      6     3
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index d99493d..b63ae6b 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1280,7 +1280,7 @@
         //ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
     }
 
-    AMediaFormat_getInt32(mFileMeta, "haptic", &mHapticChannelCount);
+    AMediaFormat_getInt32(mFileMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, &mHapticChannelCount);
 }
 
 void MyOggExtractor::setChannelMask(int channelCount) {
@@ -1297,6 +1297,8 @@
             const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
                     audioChannelCount) | hapticChannelMask;
             AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
+            AMediaFormat_setInt32(
+                    mMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, mHapticChannelCount);
         }
     } else {
         AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK,
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 5851533..f07be46 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -410,8 +410,8 @@
     }
 
     // Call these directly because we are already holding the lock.
-    mAudioRecord->setMicrophoneDirection(mSelectedMicDirection);
-    mAudioRecord->setMicrophoneFieldDimension(mSelectedMicFieldDimension);
+    mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
+    mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);
 
     if (status != NO_ERROR) {
         mActive = false;
@@ -1381,7 +1381,7 @@
     return mAudioRecord->getActiveMicrophones(activeMicrophones).transactionError();
 }
 
-status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direction)
+status_t AudioRecord::setPreferredMicrophoneDirection(audio_microphone_direction_t direction)
 {
     AutoMutex lock(mLock);
     if (mSelectedMicDirection == direction) {
@@ -1394,11 +1394,11 @@
         // the internal AudioRecord hasn't be created yet, so just stash the attribute.
         return OK;
     } else {
-        return mAudioRecord->setMicrophoneDirection(direction).transactionError();
+        return mAudioRecord->setPreferredMicrophoneDirection(direction).transactionError();
     }
 }
 
-status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
+status_t AudioRecord::setPreferredMicrophoneFieldDimension(float zoom) {
     AutoMutex lock(mLock);
     if (mSelectedMicFieldDimension == zoom) {
         // NOP
@@ -1410,7 +1410,7 @@
         // the internal AudioRecord hasn't be created yet, so just stash the attribute.
         return OK;
     } else {
-        return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
+        return mAudioRecord->setPreferredMicrophoneFieldDimension(zoom).transactionError();
     }
 }
 
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index cf9c7f4..ecf58b6 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -39,9 +39,9 @@
 
   /* Set the microphone direction (for processing).
    */
-  void setMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
+  void setPreferredMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
 
   /* Set the microphone zoom (for processing).
    */
-  void setMicrophoneFieldDimension(float zoom);
+  void setPreferredMicrophoneFieldDimension(float zoom);
 }
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index b4ddb69..9c81bb7 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -542,11 +542,11 @@
 
     /* Set the Microphone direction (for processing purposes).
      */
-            status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
+            status_t    setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
 
     /* Set the Microphone zoom factor (for processing purposes).
      */
-            status_t    setMicrophoneFieldDimension(float zoom);
+            status_t    setPreferredMicrophoneFieldDimension(float zoom);
 
      /* Get the unique port ID assigned to this AudioRecord instance by audio policy manager.
       * The ID is unique across all audioserver clients and can change during the life cycle
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index e396cf3..6c8e6a4 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -855,25 +855,26 @@
 #endif
 
 #if MAJOR_VERSION < 5
-status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+status_t StreamInHalHidl::setPreferredMicrophoneDirection(
+            audio_microphone_direction_t direction __unused) {
     if (mStream == 0) return NO_INIT;
     return INVALID_OPERATION;
 }
 
-status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom __unused) {
+status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom __unused) {
     if (mStream == 0) return NO_INIT;
     return INVALID_OPERATION;
 }
 #else
-status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction) {
+status_t StreamInHalHidl::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
     if (!mStream) return NO_INIT;
-    return processReturn("setMicrophoneDirection",
-                mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
+    return processReturn("setPreferredMicrophoneDirection",
+        mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
 }
 
-status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom) {
+status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom) {
     if (!mStream) return NO_INIT;
-    return processReturn("setMicrophoneFieldDimension",
+    return processReturn("setPreferredMicrophoneFieldDimension",
                 mStream->setMicrophoneFieldDimension(zoom));
 }
 #endif
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 9ac1067..f587889 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -221,10 +221,11 @@
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
     // Set microphone direction (for processing)
-    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) override;
+    virtual status_t setPreferredMicrophoneDirection(
+                            audio_microphone_direction_t direction) override;
 
     // Set microphone zoom (for processing)
-    virtual status_t setMicrophoneFieldDimension(float zoom) override;
+    virtual status_t setPreferredMicrophoneFieldDimension(float zoom) override;
 
     // Called when the metadata of the stream's sink has been changed.
     status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index fcb809b..7d5ce05 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -369,20 +369,21 @@
 #endif
 
 #if MAJOR_VERSION < 5
-status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+status_t StreamInHalLocal::setPreferredMicrophoneDirection(
+            audio_microphone_direction_t direction __unused) {
     return INVALID_OPERATION;
 }
 
-status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom __unused) {
+status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom __unused) {
     return INVALID_OPERATION;
 }
 #else
-status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction) {
+status_t StreamInHalLocal::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
     if (mStream->set_microphone_direction == NULL) return INVALID_OPERATION;
     return mStream->set_microphone_direction(mStream, direction);
 }
 
-status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom) {
+status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom) {
     if (mStream->set_microphone_field_dimension == NULL) return INVALID_OPERATION;
     return mStream->set_microphone_field_dimension(mStream, zoom);
 
@@ -391,3 +392,5 @@
 
 } // namespace CPP_VERSION
 } // namespace android
+
+
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index 3d6c50e..34f2bd8 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -205,10 +205,10 @@
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
     // Sets microphone direction (for processing)
-    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+    virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
 
     // Sets microphone zoom (for processing)
-    virtual status_t setMicrophoneFieldDimension(float zoom);
+    virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
 
     // Called when the metadata of the stream's sink has been changed.
     status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index ed8282f..6c3b21c 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -180,10 +180,10 @@
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
 
     // Set direction for capture processing
-    virtual status_t setMicrophoneDirection(audio_microphone_direction_t) = 0;
+    virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t) = 0;
 
     // Set zoom factor for capture stream
-    virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+    virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
 
     struct SinkMetadata {
         std::vector<record_track_metadata_t> tracks;
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 7a32d3f..d150f18 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -132,6 +132,9 @@
     shared_libs: [
         "liblog",
     ],
+    header_libs: [
+        "libhardware_headers"
+    ],
     cflags: [
         "-fvisibility=hidden",
         "-DBUILD_FLOAT",
diff --git a/media/libeffects/lvm/lib/Bundle/lib/LVM.h b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
index 83ecae1..5082a53 100644
--- a/media/libeffects/lvm/lib/Bundle/lib/LVM.h
+++ b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
@@ -298,6 +298,7 @@
     LVM_PSA_DecaySpeed_en       PSA_PeakDecayRate;      /* Peak value decay rate*/
 #ifdef SUPPORT_MC
     LVM_INT32                   NrChannels;
+    LVM_INT32                   ChMask;
 #endif
 
 } LVM_ControlParams_t;
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
index 62b4c73..1d95342 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
@@ -93,6 +93,7 @@
 
 #ifdef SUPPORT_MC
     pInstance->Params.NrChannels = pParams->NrChannels;
+    pInstance->Params.ChMask     = pParams->ChMask;
 #endif
     /*
      * Cinema Sound parameters
@@ -584,6 +585,7 @@
 
 #ifdef SUPPORT_MC
     pInstance->NrChannels = LocalParams.NrChannels;
+    pInstance->ChMask = LocalParams.ChMask;
 #endif
 
     /* Clear all internal data if format change*/
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
index 19d1532..cdd3134 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
@@ -291,6 +291,7 @@
 
 #ifdef SUPPORT_MC
     LVM_INT16              NrChannels;
+    LVM_INT32              ChMask;
 #endif
 
 } LVM_Instance_t;
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
index 94ba278..8d30a61 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
@@ -21,6 +21,7 @@
 /*  Includes                                                                            */
 /*                                                                                      */
 /****************************************************************************************/
+#include <system/audio.h>
 
 #include "LVM_Private.h"
 #include "VectorArithmetic.h"
@@ -67,6 +68,7 @@
     LVM_ReturnStatus_en  Status;
 #ifdef SUPPORT_MC
     LVM_INT32           NrChannels  = pInstance->NrChannels;
+    LVM_INT32           ChMask      = pInstance->ChMask;
 #define NrFrames SampleCount  // alias for clarity
 #endif
 
@@ -119,6 +121,7 @@
 #ifdef SUPPORT_MC
         /* Update the local variable NrChannels from pInstance->NrChannels value */
         NrChannels = pInstance->NrChannels;
+        ChMask     = pInstance->ChMask;
 #endif
 
         if(Status != LVM_SUCCESS)
@@ -140,6 +143,7 @@
         pToProcess = pOutData;
 #ifdef SUPPORT_MC
         NrChannels = 2;
+        ChMask     = AUDIO_CHANNEL_OUT_STEREO;
 #endif
     }
 
@@ -254,18 +258,24 @@
 
             }
 #ifdef SUPPORT_MC
-            /* TODO - Multichannel support to be added */
-            if (NrChannels == 2)
+            /*
+             * Volume balance
+             */
+            LVC_MixSoft_1St_MC_float_SAT(&pInstance->VC_BalanceMix,
+                                          pProcessed,
+                                          pProcessed,
+                                          NrFrames,
+                                          NrChannels,
+                                          ChMask);
+#else
+            /*
+             * Volume balance
+             */
+            LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
+                                          pProcessed,
+                                          pProcessed,
+                                          SampleCount);
 #endif
-            {
-                /*
-                 * Volume balance
-                 */
-                LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
-                                              pProcessed,
-                                              pProcessed,
-                                              SampleCount);
-            }
 
             /*
              * Perform Parametric Spectum Analysis
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c
index eb5755e..db76cd1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c
@@ -59,6 +59,31 @@
 
 
 }
+#ifdef SUPPORT_MC
+void LVC_Core_MixHard_1St_MC_float_SAT (Mix_Private_FLOAT_st **ptrInstance,
+                                         const LVM_FLOAT      *src,
+                                         LVM_FLOAT            *dst,
+                                         LVM_INT16            NrFrames,
+                                         LVM_INT16            NrChannels)
+{
+    LVM_FLOAT  Temp;
+    LVM_INT16 ii, jj;
+    for (ii = NrFrames; ii != 0; ii--)
+    {
+        for (jj = 0; jj < NrChannels; jj++)
+        {
+            Mix_Private_FLOAT_st  *pInstance1 = (Mix_Private_FLOAT_st *)(ptrInstance[jj]);
+            Temp = ((LVM_FLOAT)*(src++) * (LVM_FLOAT)pInstance1->Current);
+            if (Temp > 1.0f)
+                *dst++ = 1.0f;
+            else if (Temp < -1.0f)
+                *dst++ = -1.0f;
+            else
+                *dst++ = (LVM_FLOAT)Temp;
+        }
+    }
+}
+#endif
 #else
 void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_st        *ptrInstance1,
                                          LVMixer3_st        *ptrInstance2,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c
index 656a117..56b5dae 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c
@@ -146,6 +146,51 @@
     pInstanceR->Current = CurrentR;
 
 }
+#ifdef SUPPORT_MC
+void LVC_Core_MixSoft_1St_MC_float_WRA (Mix_Private_FLOAT_st **ptrInstance,
+                                         const LVM_FLOAT      *src,
+                                         LVM_FLOAT            *dst,
+                                         LVM_INT16            NrFrames,
+                                         LVM_INT16            NrChannels)
+{
+    LVM_INT32   ii, ch;
+    LVM_FLOAT   Temp =0.0f;
+    LVM_FLOAT   tempCurrent[NrChannels];
+    for (ch = 0; ch < NrChannels; ch++)
+    {
+        tempCurrent[ch] = ptrInstance[ch]->Current;
+    }
+    for (ii = NrFrames; ii > 0; ii--)
+    {
+        for (ch = 0; ch < NrChannels; ch++)
+        {
+            Mix_Private_FLOAT_st *pInstance = ptrInstance[ch];
+            const LVM_FLOAT   Delta = pInstance->Delta;
+            LVM_FLOAT         Current = tempCurrent[ch];
+            const LVM_FLOAT   Target = pInstance->Target;
+            if (Current < Target)
+            {
+                ADD2_SAT_FLOAT(Current, Delta, Temp);
+                Current = Temp;
+                if (Current > Target)
+                    Current = Target;
+            }
+            else
+            {
+                Current -= Delta;
+                if (Current < Target)
+                    Current = Target;
+            }
+            *dst++ = *src++ * Current;
+            tempCurrent[ch] = Current;
+        }
+    }
+    for (ch = 0; ch < NrChannels; ch++)
+    {
+        ptrInstance[ch]->Current = tempCurrent[ch];
+    }
+}
+#endif
 #else
 void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_st        *ptrInstance1,
                                          LVMixer3_st        *ptrInstance2,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c
index bd5a925..a4682d3 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c
@@ -19,6 +19,8 @@
    INCLUDE FILES
 ***********************************************************************************/
 
+#include <system/audio.h>
+
 #include "LVC_Mixer_Private.h"
 #include "VectorArithmetic.h"
 #include "ScalarArithmetic.h"
@@ -30,10 +32,207 @@
 #define TRUE          1
 #define FALSE         0
 
+#define ARRAY_SIZE(a) ((sizeof(a)) / (sizeof(*(a))))
+
 /**********************************************************************************
    FUNCTION LVC_MixSoft_1St_2i_D16C31_SAT
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+/* This threshold is used to decide on the processing to be applied on
+ * front center and back center channels
+ */
+#define LVM_VOL_BAL_THR (0.000016f)
+void LVC_MixSoft_1St_MC_float_SAT (LVMixer3_2St_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT       *src,
+                                    LVM_FLOAT             *dst,
+                                    LVM_INT16             NrFrames,
+                                    LVM_INT32             NrChannels,
+                                    LVM_INT32             ChMask)
+{
+    char        HardMixing = TRUE;
+    LVM_FLOAT   TargetGain;
+    Mix_Private_FLOAT_st  Target_lfe = {LVM_MAXFLOAT, LVM_MAXFLOAT, LVM_MAXFLOAT};
+    Mix_Private_FLOAT_st  Target_ctr = {LVM_MAXFLOAT, LVM_MAXFLOAT, LVM_MAXFLOAT};
+    Mix_Private_FLOAT_st  *pInstance1 = \
+                              (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+    Mix_Private_FLOAT_st  *pInstance2 = \
+                              (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[1].PrivateParams);
+    Mix_Private_FLOAT_st  *pMixPrivInst[4] = {pInstance1, pInstance2, &Target_ctr, &Target_lfe};
+    Mix_Private_FLOAT_st  *pInstance[NrChannels];
+
+    if (audio_channel_mask_get_representation(ChMask)
+            == AUDIO_CHANNEL_REPRESENTATION_INDEX)
+    {
+        for (int i = 0; i < 2; i++)
+        {
+            pInstance[i] = pMixPrivInst[i];
+        }
+        for (int i = 2; i < NrChannels; i++)
+        {
+            pInstance[i] = pMixPrivInst[2];
+        }
+    }
+    else
+    {
+        // TODO: Combine with system/media/audio_utils/Balance.cpp
+        // Constants in system/media/audio/include/system/audio-base.h
+        // 'mixInstIdx' is used to map the appropriate mixer instance for each channel.
+        const int mixInstIdx[] = {
+            0, // AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
+            1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
+            2, // AUDIO_CHANNEL_OUT_FRONT_CENTER          = 0x4u,
+            3, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY         = 0x8u,
+            0, // AUDIO_CHANNEL_OUT_BACK_LEFT             = 0x10u,
+            1, // AUDIO_CHANNEL_OUT_BACK_RIGHT            = 0x20u,
+            0, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER  = 0x40u,
+            1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+            2, // AUDIO_CHANNEL_OUT_BACK_CENTER           = 0x100u,
+            0, // AUDIO_CHANNEL_OUT_SIDE_LEFT             = 0x200u,
+            1, // AUDIO_CHANNEL_OUT_SIDE_RIGHT            = 0x400u,
+            2, // AUDIO_CHANNEL_OUT_TOP_CENTER            = 0x800u,
+            0, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT        = 0x1000u,
+            2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER      = 0x2000u,
+            1, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT       = 0x4000u,
+            0, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT         = 0x8000u,
+            2, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER       = 0x10000u,
+            1, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
+            0, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
+            1, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u
+        };
+        if (pInstance1->Target <= LVM_VOL_BAL_THR ||
+            pInstance2->Target <= LVM_VOL_BAL_THR)
+        {
+            Target_ctr.Target  = 0.0f;
+            Target_ctr.Current = 0.0f;
+            Target_ctr.Delta   = 0.0f;
+        }
+        const unsigned int idxArrSize = ARRAY_SIZE(mixInstIdx);
+        for (unsigned int i = 0, channel = ChMask; channel !=0 ; ++i)
+        {
+            const unsigned int idx = __builtin_ctz(channel);
+            if (idx < idxArrSize)
+            {
+                pInstance[i] = pMixPrivInst[mixInstIdx[idx]];
+            }
+            else
+            {
+                pInstance[i] = pMixPrivInst[2];
+            }
+            channel &= ~(1 << idx);
+        }
+    }
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+
+    if ((pInstance1->Current != pInstance1->Target) ||
+        (pInstance2->Current != pInstance2->Target))
+    {
+        // TODO: combine similar checks below.
+        if (pInstance1->Delta == LVM_MAXFLOAT
+                || Abs_Float(pInstance1->Current - pInstance1->Target) < pInstance1->Delta)
+        {
+            /* Difference is not significant anymore. Make them equal. */
+            pInstance1->Current = pInstance1->Target;
+            TargetGain = pInstance1->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }
+        else
+        {
+            /* Soft mixing has to be applied */
+            HardMixing = FALSE;
+        }
+
+        if (HardMixing == TRUE)
+        {
+            if (pInstance2->Delta == LVM_MAXFLOAT
+                    || Abs_Float(pInstance2->Current - pInstance2->Target) < pInstance2->Delta)
+            {
+                /* Difference is not significant anymore. Make them equal. */
+                pInstance2->Current = pInstance2->Target;
+                TargetGain = pInstance2->Target;
+                LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[1]), TargetGain);
+            }
+            else
+            {
+                /* Soft mixing has to be applied */
+                HardMixing = FALSE;
+            }
+        }
+
+        if (HardMixing == FALSE)
+        {
+             LVC_Core_MixSoft_1St_MC_float_WRA (&pInstance[0],
+                                                 src, dst, NrFrames, NrChannels);
+        }
+    }
+
+    /******************************************************************************
+       HARD MIXING
+    *******************************************************************************/
+
+    if (HardMixing == TRUE)
+    {
+        if ((pInstance1->Target == LVM_MAXFLOAT) && (pInstance2->Target == LVM_MAXFLOAT))
+        {
+            if (src != dst)
+            {
+                Copy_Float(src, dst, NrFrames*NrChannels);
+            }
+        }
+        else
+        {
+            LVC_Core_MixHard_1St_MC_float_SAT(&(pInstance[0]),
+                                               src, dst, NrFrames, NrChannels);
+        }
+    }
+
+    /******************************************************************************
+       CALL BACK
+    *******************************************************************************/
+
+    if (ptrInstance->MixerStream[0].CallbackSet)
+    {
+        if (Abs_Float(pInstance1->Current - pInstance1->Target) < pInstance1->Delta)
+        {
+            pInstance1->Current = pInstance1->Target; /* Difference is not significant anymore. \
+                                                         Make them equal. */
+            TargetGain = pInstance1->Target;
+            LVC_Mixer_SetTarget(&ptrInstance->MixerStream[0], TargetGain);
+            ptrInstance->MixerStream[0].CallbackSet = FALSE;
+            if (ptrInstance->MixerStream[0].pCallBack != 0)
+            {
+                (*ptrInstance->MixerStream[0].pCallBack) (\
+                    ptrInstance->MixerStream[0].pCallbackHandle,
+                    ptrInstance->MixerStream[0].pGeneralPurpose,
+                    ptrInstance->MixerStream[0].CallbackParam);
+            }
+        }
+    }
+    if (ptrInstance->MixerStream[1].CallbackSet)
+    {
+        if (Abs_Float(pInstance2->Current - pInstance2->Target) < pInstance2->Delta)
+        {
+            pInstance2->Current = pInstance2->Target; /* Difference is not significant anymore.
+                                                         Make them equal. */
+            TargetGain = pInstance2->Target;
+            LVC_Mixer_SetTarget(&ptrInstance->MixerStream[1], TargetGain);
+            ptrInstance->MixerStream[1].CallbackSet = FALSE;
+            if (ptrInstance->MixerStream[1].pCallBack != 0)
+            {
+                (*ptrInstance->MixerStream[1].pCallBack) (\
+                    ptrInstance->MixerStream[1].pCallbackHandle,
+                    ptrInstance->MixerStream[1].pGeneralPurpose,
+                    ptrInstance->MixerStream[1].CallbackParam);
+            }
+        }
+    }
+}
+#endif
 void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_FLOAT_st *ptrInstance,
                                     const LVM_FLOAT             *src,
                                     LVM_FLOAT             *dst,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
index 7f18747..199d529 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
@@ -224,6 +224,14 @@
 /* Gain values should not be more that 1.0                                        */
 /**********************************************************************************/
 #ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void LVC_MixSoft_1St_MC_float_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                   const   LVM_FLOAT     *src,
+                                   LVM_FLOAT             *dst,   /* dst can be equal to src */
+                                   LVM_INT16             NrFrames,
+                                   LVM_INT32             NrChannels,
+                                   LVM_INT32             ChMask);
+#endif
 void LVC_MixSoft_1St_2i_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
                                    const   LVM_FLOAT     *src,
                                    LVM_FLOAT             *dst,   /* dst can be equal to src */
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
index f10094b..453a6a5 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
@@ -116,6 +116,13 @@
 /* Gain values should not be more that 1.0                                        */
 /**********************************************************************************/
 #ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void LVC_Core_MixSoft_1St_MC_float_WRA(Mix_Private_FLOAT_st **ptrInstance,
+                                         const LVM_FLOAT      *src,
+                                         LVM_FLOAT            *dst,
+                                         LVM_INT16            NrFrames,
+                                         LVM_INT16            NrChannels);
+#endif
 void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_FLOAT_st        *ptrInstance1,
                                          LVMixer3_FLOAT_st        *ptrInstance2,
                                          const LVM_FLOAT    *src,
@@ -136,6 +143,13 @@
 /* Gain values should not be more that 1.0                                        */
 /**********************************************************************************/
 #ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void LVC_Core_MixHard_1St_MC_float_SAT(Mix_Private_FLOAT_st **ptrInstance,
+                                         const LVM_FLOAT      *src,
+                                         LVM_FLOAT            *dst,
+                                         LVM_INT16            NrFrames,
+                                         LVM_INT16            NrChannels);
+#endif
 void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_FLOAT_st        *ptrInstance1,
                                          LVMixer3_FLOAT_st        *ptrInstance2,
                                          const LVM_FLOAT    *src,
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 1a874a3..5079634 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -36,6 +36,10 @@
     "-csE -tE"
     "-csE -eqE" "-tE -eqE"
     "-csE -tE -bE -M -eqE"
+    "-tE -eqE -vcBal:96 -M"
+    "-tE -eqE -vcBal:-96 -M"
+    "-tE -eqE -vcBal:0 -M"
+    "-tE -eqE -bE -vcBal:30 -M"
 )
 
 fs_arr=(
@@ -56,26 +60,41 @@
 
 # run multichannel effects at different configs, saving only the stereo channel
 # pair.
+error_count=0
 for flags in "${flags_arr[@]}"
 do
     for fs in ${fs_arr[*]}
     do
-        for ch in {1..8}
+        for chMask in {0..22}
         do
             adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
-                -o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
+                -o:$testdir/sinesweep_$((chMask))_$((fs)).raw -chMask:$chMask -fs:$fs $flags
+
+            shell_ret=$?
+            if [ $shell_ret -ne 0 ]; then
+                echo "error: $shell_ret"
+                ((++error_count))
+            fi
+
 
             # two channel files should be identical to higher channel
             # computation (first 2 channels).
             # Do not compare cases where -bE is in flags (due to mono computation)
-            if [[ $flags != *"-bE"* ]] && [ "$ch" -gt 2 ]
+            if [[ $flags != *"-bE"* ]] && [[ "$chMask" -gt 1 ]]
             then
-                adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
-                    $testdir/sinesweep_$((ch))_$((fs)).raw
-            elif [[ $flags == *"-bE"* ]] && [ "$ch" -gt 2 ]
+                adb shell cmp $testdir/sinesweep_1_$((fs)).raw \
+                    $testdir/sinesweep_$((chMask))_$((fs)).raw
+            elif [[ $flags == *"-bE"* ]] && [[ "$chMask" -gt 1 ]]
             then
-                adb shell $testdir/snr $testdir/sinesweep_2_$((fs)).raw \
-                    $testdir/sinesweep_$((ch))_$((fs)).raw -thr:90.308998
+                adb shell $testdir/snr $testdir/sinesweep_1_$((fs)).raw \
+                    $testdir/sinesweep_$((chMask))_$((fs)).raw -thr:90.308998
+            fi
+
+            # both cmp and snr return EXIT_FAILURE on mismatch.
+            shell_ret=$?
+            if [ $shell_ret -ne 0 ]; then
+                echo "error: $shell_ret"
+                ((++error_count))
             fi
 
         done
@@ -83,3 +102,5 @@
 done
 
 adb shell rm -r $testdir
+echo "$error_count errors"
+exit $error_count
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index 416bdaa..5b58dd1 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -24,6 +24,7 @@
 #include <audio_utils/channels.h>
 #include <audio_utils/primitives.h>
 #include <log/log.h>
+#include <system/audio.h>
 
 #include "EffectBundle.h"
 #include "LVM_Private.h"
@@ -76,6 +77,8 @@
 struct lvmConfigParams_t {
   int              samplingFreq    = 44100;
   int              nrChannels      = 2;
+  int              chMask          = AUDIO_CHANNEL_OUT_STEREO;
+  int              vcBal           = 0;
   int              fChannels       = 2;
   bool             monoMode        = false;
   int              bassEffectLevel = 0;
@@ -87,9 +90,36 @@
   LVM_Mode_en      csEnable        = LVM_MODE_OFF;
 };
 
+constexpr audio_channel_mask_t lvmConfigChMask[] = {
+    AUDIO_CHANNEL_OUT_MONO,
+    AUDIO_CHANNEL_OUT_STEREO,
+    AUDIO_CHANNEL_OUT_2POINT1,
+    AUDIO_CHANNEL_OUT_2POINT0POINT2,
+    AUDIO_CHANNEL_OUT_QUAD,
+    AUDIO_CHANNEL_OUT_QUAD_BACK,
+    AUDIO_CHANNEL_OUT_QUAD_SIDE,
+    AUDIO_CHANNEL_OUT_SURROUND,
+    (1 << 4) - 1,
+    AUDIO_CHANNEL_OUT_2POINT1POINT2,
+    AUDIO_CHANNEL_OUT_3POINT0POINT2,
+    AUDIO_CHANNEL_OUT_PENTA,
+    (1 << 5) - 1,
+    AUDIO_CHANNEL_OUT_3POINT1POINT2,
+    AUDIO_CHANNEL_OUT_5POINT1,
+    AUDIO_CHANNEL_OUT_5POINT1_BACK,
+    AUDIO_CHANNEL_OUT_5POINT1_SIDE,
+    (1 << 6) - 1,
+    AUDIO_CHANNEL_OUT_6POINT1,
+    (1 << 7) - 1,
+    AUDIO_CHANNEL_OUT_5POINT1POINT2,
+    AUDIO_CHANNEL_OUT_7POINT1,
+    (1 << 8) - 1,
+};
+
+
 void printUsage() {
   printf("\nUsage: ");
-  printf("\n     <exceutable> -i:<input_file> -o:<out_file> [options]\n");
+  printf("\n     <executable> -i:<input_file> -o:<out_file> [options]\n");
   printf("\nwhere, \n     <inputfile>  is the input file name");
   printf("\n                  on which LVM effects are applied");
   printf("\n     <outputfile> processed output file");
@@ -98,7 +128,34 @@
   printf("\n     -help (or) -h");
   printf("\n           Prints this usage information");
   printf("\n");
-  printf("\n     -ch:<process_channels> (1 through 8)\n\n");
+  printf("\n     -chMask:<channel_mask>\n");
+  printf("\n         0  - AUDIO_CHANNEL_OUT_MONO");
+  printf("\n         1  - AUDIO_CHANNEL_OUT_STEREO");
+  printf("\n         2  - AUDIO_CHANNEL_OUT_2POINT1");
+  printf("\n         3  - AUDIO_CHANNEL_OUT_2POINT0POINT2");
+  printf("\n         4  - AUDIO_CHANNEL_OUT_QUAD");
+  printf("\n         5  - AUDIO_CHANNEL_OUT_QUAD_BACK");
+  printf("\n         6  - AUDIO_CHANNEL_OUT_QUAD_SIDE");
+  printf("\n         7  - AUDIO_CHANNEL_OUT_SURROUND");
+  printf("\n         8  - canonical channel index mask for 4 ch: (1 << 4) - 1");
+  printf("\n         9  - AUDIO_CHANNEL_OUT_2POINT1POINT2");
+  printf("\n         10 - AUDIO_CHANNEL_OUT_3POINT0POINT2");
+  printf("\n         11 - AUDIO_CHANNEL_OUT_PENTA");
+  printf("\n         12 - canonical channel index mask for 5 ch: (1 << 5) - 1");
+  printf("\n         13 - AUDIO_CHANNEL_OUT_3POINT1POINT2");
+  printf("\n         14 - AUDIO_CHANNEL_OUT_5POINT1");
+  printf("\n         15 - AUDIO_CHANNEL_OUT_5POINT1_BACK");
+  printf("\n         16 - AUDIO_CHANNEL_OUT_5POINT1_SIDE");
+  printf("\n         17 - canonical channel index mask for 6 ch: (1 << 6) - 1");
+  printf("\n         18 - AUDIO_CHANNEL_OUT_6POINT1");
+  printf("\n         19 - canonical channel index mask for 7 ch: (1 << 7) - 1");
+  printf("\n         20 - AUDIO_CHANNEL_OUT_5POINT1POINT2");
+  printf("\n         21 - AUDIO_CHANNEL_OUT_7POINT1");
+  printf("\n         22 - canonical channel index mask for 8 ch: (1 << 8) - 1");
+  printf("\n         default 0");
+  printf("\n     -vcBal:<Left Right Balance control in dB [-96 to 96 dB]>");
+  printf("\n            -ve values reduce Right channel while +ve value reduces Left channel");
+  printf("\n                 default 0");
   printf("\n     -fch:<file_channels> (1 through 8)\n\n");
   printf("\n     -M");
   printf("\n           Mono mode (force all input audio channels to be identical)");
@@ -298,6 +355,7 @@
   params->OperatingMode = LVM_MODE_ON;
   params->SampleRate = LVM_FS_44100;
   params->SourceFormat = LVM_STEREO;
+  params->ChMask       = AUDIO_CHANNEL_OUT_STEREO;
   params->SpeakerType = LVM_HEADPHONES;
 
   pContext->pBundledContext->SampleRate = LVM_FS_44100;
@@ -452,13 +510,13 @@
   params->OperatingMode = LVM_MODE_ON;
   params->SpeakerType = LVM_HEADPHONES;
 
-  const int nrChannels = plvmConfigParams->nrChannels;
-  params->NrChannels = nrChannels;
-  if (nrChannels == 1) {
+  params->ChMask     = plvmConfigParams->chMask;
+  params->NrChannels = plvmConfigParams->nrChannels;
+  if (params->NrChannels == 1) {
     params->SourceFormat = LVM_MONO;
-  } else if (nrChannels == 2) {
+  } else if (params->NrChannels == 2) {
     params->SourceFormat = LVM_STEREO;
-  } else if (nrChannels > 2 && nrChannels <= 8) { // FCC_2 FCC_8
+  } else if (params->NrChannels > 2 && params->NrChannels <= 8) { // FCC_2 FCC_8
     params->SourceFormat = LVM_MULTICHANNEL;
   } else {
       return -EINVAL;
@@ -531,7 +589,7 @@
 
   /* Volume Control parameters */
   params->VC_EffectLevel = 0;
-  params->VC_Balance = 0;
+  params->VC_Balance = plvmConfigParams->vcBal;
 
   /* Treble Enhancement parameters */
   params->TE_OperatingMode = plvmConfigParams->trebleEnable;
@@ -667,13 +725,21 @@
         return -1;
       }
       lvmConfigParams.samplingFreq = samplingFreq;
-    } else if (!strncmp(argv[i], "-ch:", 4)) {
-      const int nrChannels = atoi(argv[i] + 4);
-      if (nrChannels > 8 || nrChannels < 1) {
-        printf("Error: Unsupported number of channels : %d\n", nrChannels);
+    } else if (!strncmp(argv[i], "-chMask:", 8)) {
+      const int chMaskConfigIdx = atoi(argv[i] + 8);
+      if (chMaskConfigIdx < 0 || (size_t)chMaskConfigIdx >= std::size(lvmConfigChMask)) {
+        ALOGE("\nError: Unsupported Channel Mask : %d\n", chMaskConfigIdx);
         return -1;
       }
-      lvmConfigParams.nrChannels = nrChannels;
+      const audio_channel_mask_t chMask = lvmConfigChMask[chMaskConfigIdx];
+      lvmConfigParams.chMask = chMask;
+      lvmConfigParams.nrChannels = audio_channel_count_from_out_mask(chMask);
+    } else if (!strncmp(argv[i], "-vcBal:", 7)) {
+      const int vcBalance = atoi(argv[i] + 7);
+      if (vcBalance > 96 || vcBalance < -96) {
+        ALOGE("\nError: Unsupported volume balance value: %d\n", vcBalance);
+      }
+      lvmConfigParams.vcBal = vcBalance;
     } else if (!strncmp(argv[i], "-fch:", 5)) {
       const int fChannels = atoi(argv[i] + 5);
       if (fChannels > 8 || fChannels < 1) {
diff --git a/media/libeffects/lvm/tests/snr.cpp b/media/libeffects/lvm/tests/snr.cpp
index 88110c0..885994c 100644
--- a/media/libeffects/lvm/tests/snr.cpp
+++ b/media/libeffects/lvm/tests/snr.cpp
@@ -84,6 +84,7 @@
     printf("\nError: missing input/reference files\n");
     return -1;
   }
+  int ret = EXIT_SUCCESS;
   auto sn = pcm_format == 0
       ? getSignalNoise<short>(finp, fref)
       : getSignalNoise<float>(finp, fref);
@@ -92,6 +93,7 @@
     // compare the measured snr value with threshold
     if (snr < thr) {
       printf("%.6f less than threshold %.6f\n", snr, thr);
+      ret = EXIT_FAILURE;
     } else {
       printf("%.6f\n", snr);
     }
@@ -99,5 +101,5 @@
   fclose(finp);
   fclose(fref);
 
-  return 0;
+  return ret;
 }
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 0c6f8de..3a97905 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1315,6 +1315,7 @@
 
 #ifdef SUPPORT_MC
         ActiveParams.NrChannels = NrChannels;
+        ActiveParams.ChMask = pConfig->inputCfg.channels;
 #endif
 
         LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index f283569..a354ce1 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -66,8 +66,8 @@
     ENABLE_AUDIO_DEVICE_CALLBACK,
     GET_ACTIVE_MICROPHONES,
     GET_PORT_ID,
-    SET_MICROPHONE_DIRECTION,
-    SET_MICROPHONE_FIELD_DIMENSION
+    SET_PREFERRED_MICROPHONE_DIRECTION,
+    SET_PREFERRED_MICROPHONE_FIELD_DIMENSION
 };
 
 class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -409,21 +409,21 @@
         return status;
     }
 
-    status_t setMicrophoneDirection(audio_microphone_direction_t direction) {
-        ALOGV("setMicrophoneDirection(%d)", direction);
+    status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+        ALOGV("setPreferredMicrophoneDirection(%d)", direction);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
         data.writeInt32(direction);
-        status_t status = remote()->transact(SET_MICROPHONE_DIRECTION, data, &reply);
+        status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_DIRECTION, data, &reply);
         return status == NO_ERROR ? (status_t)reply.readInt32() : status;
     }
 
-    status_t setMicrophoneFieldDimension(float zoom) {
-        ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+    status_t setPreferredMicrophoneFieldDimension(float zoom) {
+        ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
         data.writeFloat(zoom);
-        status_t status = remote()->transact(SET_MICROPHONE_FIELD_DIMENSION, data, &reply);
+        status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_FIELD_DIMENSION, data, &reply);
         return status == NO_ERROR ? (status_t)reply.readInt32() : status;
     }
 
@@ -709,20 +709,20 @@
             }
             return NO_ERROR;
         }
-        case SET_MICROPHONE_DIRECTION: {
-            ALOGV("SET_MICROPHONE_DIRECTION");
+        case SET_PREFERRED_MICROPHONE_DIRECTION: {
+            ALOGV("SET_PREFERRED_MICROPHONE_DIRECTION");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
             int direction = data.readInt32();
-            status_t status =
-                setMicrophoneDirection(static_cast<audio_microphone_direction_t>(direction));
+            status_t status = setPreferredMicrophoneDirection(
+                    static_cast<audio_microphone_direction_t>(direction));
             reply->writeInt32(status);
             return NO_ERROR;
         }
-        case SET_MICROPHONE_FIELD_DIMENSION: {
+        case SET_PREFERRED_MICROPHONE_FIELD_DIMENSION: {
             ALOGV("SET_MICROPHONE_FIELD_DIMENSION");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
             float zoom = data.readFloat();
-            status_t status = setMicrophoneFieldDimension(zoom);
+            status_t status = setPreferredMicrophoneFieldDimension(zoom);
             reply->writeInt32(status);
             return NO_ERROR;
         }
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index ea0547c..c150407 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -65,6 +65,7 @@
     AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
     AMEDIAFORMAT_KEY_GRID_COLUMNS,
     AMEDIAFORMAT_KEY_GRID_ROWS,
+    AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT,
     AMEDIAFORMAT_KEY_HEIGHT,
     AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD,
     AMEDIAFORMAT_KEY_IS_ADTS,
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 0b09420..f9c557c 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -73,8 +73,8 @@
     virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
     virtual status_t getActiveMicrophones(
                         std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
-    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
-    virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+    virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+    virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
     virtual status_t getPortId(audio_port_handle_t *portId) = 0;
 };
 
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 88282ac..a2dff31 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -72,8 +72,8 @@
     virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
     virtual status_t getActiveMicrophones(
                         std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
-    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
-    virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+    virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+    virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
     virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
 
 
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 8580437..2dd4b7f 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -264,8 +264,8 @@
     status_t    getRoutedDeviceId(audio_port_handle_t *deviceId);
     status_t    enableAudioDeviceCallback(bool enabled);
     status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
-    status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
-    status_t    setMicrophoneFieldDimension(float zoom);
+    status_t    setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+    status_t    setPreferredMicrophoneFieldDimension(float zoom);
 
     status_t    getPortId(audio_port_handle_t *portId) const;
 
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 6c59a29..4570af9 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -842,14 +842,14 @@
     return mMediaRecorder->getActiveMicrophones(activeMicrophones);
 }
 
-status_t MediaRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
-    ALOGV("setMicrophoneDirection(%d)", direction);
-    return mMediaRecorder->setMicrophoneDirection(direction);
+status_t MediaRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+    ALOGV("setPreferredMicrophoneDirection(%d)", direction);
+    return mMediaRecorder->setPreferredMicrophoneDirection(direction);
 }
 
-status_t MediaRecorder::setMicrophoneFieldDimension(float zoom) {
-    ALOGV("setMicrophoneFieldDimension(%f)", zoom);
-    return mMediaRecorder->setMicrophoneFieldDimension(zoom);
+status_t MediaRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
+    ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
+    return mMediaRecorder->setPreferredMicrophoneFieldDimension(zoom);
 }
 
 status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const
diff --git a/media/libmedia/xsd/api/current.txt b/media/libmedia/xsd/api/current.txt
index 0924dd9..05e8a49 100644
--- a/media/libmedia/xsd/api/current.txt
+++ b/media/libmedia/xsd/api/current.txt
@@ -45,10 +45,17 @@
     ctor public CamcorderProfiles();
     method public int getCameraId();
     method public java.util.List<media.profiles.EncoderProfile> getEncoderProfile();
+    method public java.util.List<media.profiles.CamcorderProfiles.ImageDecoding> getImageDecoding();
     method public java.util.List<media.profiles.CamcorderProfiles.ImageEncoding> getImageEncoding();
     method public void setCameraId(int);
   }
 
+  public static class CamcorderProfiles.ImageDecoding {
+    ctor public CamcorderProfiles.ImageDecoding();
+    method public int getMemCap();
+    method public void setMemCap(int);
+  }
+
   public static class CamcorderProfiles.ImageEncoding {
     ctor public CamcorderProfiles.ImageEncoding();
     method public int getQuality();
diff --git a/media/libmedia/xsd/media_profiles.xsd b/media/libmedia/xsd/media_profiles.xsd
index a9687b0..a02252a 100644
--- a/media/libmedia/xsd/media_profiles.xsd
+++ b/media/libmedia/xsd/media_profiles.xsd
@@ -42,6 +42,11 @@
                     <xs:attribute name="quality" type="xs:int"/>
                 </xs:complexType>
             </xs:element>
+            <xs:element name="ImageDecoding" minOccurs="0" maxOccurs="unbounded">
+                <xs:complexType>
+                    <xs:attribute name="memCap" type="xs:int"/>
+                </xs:complexType>
+            </xs:element>
         </xs:sequence>
         <xs:attribute name="cameraId" type="xs:int"/>
     </xs:complexType>
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index d6628d9..9f4265b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -538,18 +538,19 @@
     return NO_INIT;
 }
 
-status_t MediaRecorderClient::setMicrophoneDirection(audio_microphone_direction_t direction) {
-    ALOGV("setMicrophoneDirection(%d)", direction);
+status_t MediaRecorderClient::setPreferredMicrophoneDirection(
+            audio_microphone_direction_t direction) {
+    ALOGV("setPreferredMicrophoneDirection(%d)", direction);
     if (mRecorder != NULL) {
-        return mRecorder->setMicrophoneDirection(direction);
+        return mRecorder->setPreferredMicrophoneDirection(direction);
     }
     return NO_INIT;
 }
 
-status_t MediaRecorderClient::setMicrophoneFieldDimension(float zoom) {
-    ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t MediaRecorderClient::setPreferredMicrophoneFieldDimension(float zoom) {
+    ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
     if (mRecorder != NULL) {
-        return mRecorder->setMicrophoneFieldDimension(zoom);
+        return mRecorder->setPreferredMicrophoneFieldDimension(zoom);
     }
     return NO_INIT;
 }
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 8da718f..e698819 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -109,8 +109,8 @@
     virtual     status_t   enableAudioDeviceCallback(bool enabled);
     virtual     status_t   getActiveMicrophones(
                               std::vector<media::MicrophoneInfo>* activeMicrophones);
-    virtual     status_t   setMicrophoneDirection(audio_microphone_direction_t direction);
-    virtual     status_t   setMicrophoneFieldDimension(float zoom);
+    virtual     status_t   setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+    virtual     status_t   setPreferredMicrophoneFieldDimension(float zoom);
                 status_t   getPortId(audio_port_handle_t *portId) override;
 
 private:
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 77777b8..63681fa 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -164,9 +164,12 @@
     mAnalyticsItem->setInt32(kRecorderVideoIframeInterval, mIFramesIntervalSec);
     // TBD mAudioSourceNode = 0;
     // TBD mUse64BitFileOffset = false;
-    mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
-    mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
-    mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
+    if (mMovieTimeScale != -1)
+        mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
+    if (mAudioTimeScale != -1)
+        mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
+    if (mVideoTimeScale != -1)
+        mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
     // TBD mCameraId        = 0;
     // TBD mStartTimeOffsetMs = -1;
     mAnalyticsItem->setInt32(kRecorderVideoProfile, mVideoEncoderProfile);
@@ -2210,7 +2213,7 @@
 }
 
 status_t StagefrightRecorder::getMetrics(Parcel *reply) {
-    ALOGD("StagefrightRecorder::getMetrics");
+    ALOGV("StagefrightRecorder::getMetrics");
 
     if (reply == NULL) {
         ALOGE("Null pointer argument");
@@ -2274,20 +2277,20 @@
     return NO_INIT;
 }
 
-status_t StagefrightRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
-    ALOGV("setMicrophoneDirection(%d)", direction);
+status_t StagefrightRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+    ALOGV("setPreferredMicrophoneDirection(%d)", direction);
     mSelectedMicDirection = direction;
     if (mAudioSourceNode != 0) {
-        return mAudioSourceNode->setMicrophoneDirection(direction);
+        return mAudioSourceNode->setPreferredMicrophoneDirection(direction);
     }
     return NO_INIT;
 }
 
-status_t StagefrightRecorder::setMicrophoneFieldDimension(float zoom) {
-    ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t StagefrightRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
+    ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
     mSelectedMicFieldDimension = zoom;
     if (mAudioSourceNode != 0) {
-        return mAudioSourceNode->setMicrophoneFieldDimension(zoom);
+        return mAudioSourceNode->setPreferredMicrophoneFieldDimension(zoom);
     }
     return NO_INIT;
 }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 236b19e..8bf083a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -77,8 +77,8 @@
     virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
     virtual status_t enableAudioDeviceCallback(bool enabled);
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
-    virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
-    virtual status_t setMicrophoneFieldDimension(float zoom);
+    virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+    virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
             status_t getPortId(audio_port_handle_t *portId) const override;
 
 private:
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 5f86bd3..5194e03 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -510,18 +510,18 @@
     return NO_INIT;
 }
 
-status_t AudioSource::setMicrophoneDirection(audio_microphone_direction_t direction) {
-    ALOGV("setMicrophoneDirection(%d)", direction);
+status_t AudioSource::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+    ALOGV("setPreferredMicrophoneDirection(%d)", direction);
     if (mRecord != 0) {
-        return mRecord->setMicrophoneDirection(direction);
+        return mRecord->setPreferredMicrophoneDirection(direction);
     }
     return NO_INIT;
 }
 
-status_t AudioSource::setMicrophoneFieldDimension(float zoom) {
-    ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t AudioSource::setPreferredMicrophoneFieldDimension(float zoom) {
+    ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
     if (mRecord != 0) {
-        return mRecord->setMicrophoneFieldDimension(zoom);
+        return mRecord->setPreferredMicrophoneFieldDimension(zoom);
     }
     return NO_INIT;
 }
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index dbc287e..3f0bc7d 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -309,7 +309,6 @@
 void parseVorbisComment(
         AMediaFormat *fileMeta, const char *comment, size_t commentLength) {
     // Haptic tag is only kept here as it will only be used in extractor to generate channel mask.
-    const char* const haptic = "haptic";
     struct {
         const char *const mTag;
         const char *mKey;
@@ -330,7 +329,7 @@
         { "LYRICIST", AMEDIAFORMAT_KEY_LYRICIST },
         { "METADATA_BLOCK_PICTURE", AMEDIAFORMAT_KEY_ALBUMART },
         { "ANDROID_LOOP", AMEDIAFORMAT_KEY_LOOP },
-        { "ANDROID_HAPTIC", haptic },
+        { "ANDROID_HAPTIC", AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT },
     };
 
         for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
@@ -346,12 +345,12 @@
                     if (!strcasecmp(&comment[tagLen + 1], "true")) {
                         AMediaFormat_setInt32(fileMeta, AMEDIAFORMAT_KEY_LOOP, 1);
                     }
-                } else if (kMap[j].mKey == haptic) {
+                } else if (kMap[j].mKey == AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT) {
                     char *end;
                     errno = 0;
                     const int hapticChannelCount = strtol(&comment[tagLen + 1], &end, 10);
                     if (errno == 0) {
-                        AMediaFormat_setInt32(fileMeta, haptic, hapticChannelCount);
+                        AMediaFormat_setInt32(fileMeta, kMap[j].mKey, hapticChannelCount);
                     } else {
                         ALOGE("Error(%d) when parsing haptic channel count", errno);
                     }
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
index b0ce688..29c3a35 100644
--- a/media/libstagefright/RemoteMediaExtractor.cpp
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "RemoteMediaExtractor"
 #include <utils/Log.h>
 
+#include <binder/IPCThreadState.h>
 #include <media/stagefright/InterfaceUtils.h>
 #include <media/MediaAnalyticsItem.h>
 #include <media/MediaSource.h>
@@ -51,6 +52,11 @@
     if (MEDIA_LOG) {
         mAnalyticsItem = MediaAnalyticsItem::create(kKeyExtractor);
 
+        // we're in the extractor service, we want to attribute to the app
+        // that invoked us.
+        int uid = IPCThreadState::self()->getCallingUid();
+        mAnalyticsItem->setUid(uid);
+
         // track the container format (mpeg, aac, wvm, etc)
         size_t ntracks = extractor->countTracks();
         mAnalyticsItem->setCString(kExtractorFormat, extractor->name());
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2aa9ed8..c7b2719 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -967,6 +967,11 @@
         if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
             msg->setInt32("pcm-encoding", pcmEncoding);
         }
+
+        int32_t hapticChannelCount;
+        if (meta->findInt32(kKeyHapticChannelCount, &hapticChannelCount)) {
+            msg->setInt32("haptic-channel-count", hapticChannelCount);
+        }
     }
 
     int32_t maxInputSize;
@@ -1708,6 +1713,11 @@
         if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
             meta->setInt32(kKeyPcmEncoding, pcmEncoding);
         }
+
+        int32_t hapticChannelCount;
+        if (msg->findInt32("haptic-channel-count", &hapticChannelCount)) {
+            meta->setInt32(kKeyHapticChannelCount, hapticChannelCount);
+        }
     }
 
     int32_t maxInputSize;
diff --git a/media/libstagefright/codecs/amrnb/common/src/lsp.cpp b/media/libstagefright/codecs/amrnb/common/src/lsp.cpp
index 0e3f772..81d9cde 100644
--- a/media/libstagefright/codecs/amrnb/common/src/lsp.cpp
+++ b/media/libstagefright/codecs/amrnb/common/src/lsp.cpp
@@ -173,7 +173,7 @@
     *st = NULL;
 
     /* allocate memory */
-    if ((s = (lspState *) malloc(sizeof(lspState))) == NULL)
+    if ((s = (lspState *) calloc(sizeof(lspState), 1)) == NULL)
     {
         /* fprintf(stderr, "lsp_init: can not malloc state structure\n"); */
         return -1;
@@ -182,11 +182,13 @@
     /* Initialize quantization state */
     if (0 != Q_plsf_init(&s->qSt))
     {
+        lsp_exit(&s);
         return -1;
     }
 
     if (0 != lsp_reset(s))
     {
+        lsp_exit(&s);
         return -1;
     }
 
diff --git a/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp b/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp
index 2989b74..49cafff 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp
@@ -268,13 +268,7 @@
     if (Decoder_amr_init(&s->decoder_amrState)
             || Post_Process_reset(&s->postHP_state))
     {
-        Speech_Decode_FrameState *tmp = s;
-        /*
-         *  dereferencing type-punned pointer avoid
-         *  breaking strict-aliasing rules
-         */
-        void** tempVoid = (void**) tmp;
-        GSMDecodeFrameExit(tempVoid);
+        free(s);
         return (-1);
     }
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index d153598..c62c2cd 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -2125,7 +2125,10 @@
     size_t offset = 0;
     while (offset < buffer->size()) {
         const uint8_t *adtsHeader = buffer->data() + offset;
-        CHECK_LT(offset + 5, buffer->size());
+        if (buffer->size() <= offset+5) {
+            ALOGV("buffer does not contain a complete header");
+            return ERROR_MALFORMED;
+        }
         // non-const pointer for decryption if needed
         uint8_t *adtsFrame = buffer->data() + offset;
 
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 18e5f10..af04dad 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -70,8 +70,8 @@
     status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
 
     status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
-    status_t setMicrophoneDirection(audio_microphone_direction_t direction);
-    status_t setMicrophoneFieldDimension(float zoom);
+    status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+    status_t setPreferredMicrophoneFieldDimension(float zoom);
 
     status_t getPortId(audio_port_handle_t *portId) const;
 
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 75fd0d9..8dc2dd5 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -62,8 +62,6 @@
     kKeyAV1C              = 'av1c',  // raw data
     kKeyThumbnailHVCC     = 'thvc',  // raw data
     kKeyD263              = 'd263',  // raw data
-    kKeyVorbisInfo        = 'vinf',  // raw data
-    kKeyVorbisBooks       = 'vboo',  // raw data
     kKeyOpusHeader        = 'ohdr',  // raw data
     kKeyOpusCodecDelay    = 'ocod',  // uint64_t (codec delay in ns)
     kKeyOpusSeekPreRoll   = 'ospr',  // uint64_t (seek preroll in ns)
@@ -238,6 +236,8 @@
     kKeyOpaqueCSD0       = 'csd0',
     kKeyOpaqueCSD1       = 'csd1',
     kKeyOpaqueCSD2       = 'csd2',
+
+    kKeyHapticChannelCount = 'hapC',
 };
 
 enum {
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
index e32f676..7d446ab 100644
--- a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
@@ -149,6 +149,11 @@
     }
 
     // ADTS header is included in the size
+    if (size < adtsHdrSize) {
+        ALOGV("processAAC: size (%zu) < adtsHdrSize (%zu)", size, adtsHdrSize);
+        android_errorWriteLog(0x534e4554, "128433933");
+        return;
+    }
     size_t offset = adtsHdrSize;
     size_t remainingBytes = size - adtsHdrSize;
 
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index b0a303e..26e0884 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -177,8 +177,8 @@
         const void *headerData3;
         size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
 
-        if (!md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
-            || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+        if (!md->findData(kKeyOpaqueCSD0, &type, &headerData1, &headerSize1)
+            || !md->findData(kKeyOpaqueCSD1, &type, &headerData3, &headerSize3)) {
             ALOGE("Missing header format keys for vorbis track");
             md->dumpToLog();
             return NULL;
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index f5245c1..5443f2c 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -1,6 +1,12 @@
 // Signature format: 2.0
 package media.codecs {
 
+  public class Alias {
+    ctor public Alias();
+    method public String getName();
+    method public void setName(String);
+  }
+
   public class Decoders {
     ctor public Decoders();
     method public java.util.List<media.codecs.MediaCodec> getMediaCodec();
@@ -23,6 +29,23 @@
     method public void setValue(String);
   }
 
+  public class Include {
+    ctor public Include();
+    method public String getHref();
+    method public void setHref(String);
+  }
+
+  public class Included {
+    ctor public Included();
+    method public media.codecs.Decoders getDecoders_optional();
+    method public media.codecs.Encoders getEncoders_optional();
+    method public java.util.List<media.codecs.Include> getInclude_optional();
+    method public media.codecs.Settings getSettings_optional();
+    method public void setDecoders_optional(media.codecs.Decoders);
+    method public void setEncoders_optional(media.codecs.Encoders);
+    method public void setSettings_optional(media.codecs.Settings);
+  }
+
   public class Limit {
     ctor public Limit();
     method public String getIn();
@@ -47,12 +70,13 @@
 
   public class MediaCodec {
     ctor public MediaCodec();
-    method public java.util.List<media.codecs.Feature> getFeature();
-    method public java.util.List<media.codecs.Limit> getLimit();
+    method public java.util.List<media.codecs.Alias> getAlias_optional();
+    method public java.util.List<media.codecs.Feature> getFeature_optional();
+    method public java.util.List<media.codecs.Limit> getLimit_optional();
     method public String getName();
-    method public java.util.List<media.codecs.Quirk> getQuirk();
-    method public java.util.List<media.codecs.Type> getType();
+    method public java.util.List<media.codecs.Quirk> getQuirk_optional();
     method public String getType();
+    method public java.util.List<media.codecs.Type> getType_optional();
     method public String getUpdate();
     method public void setName(String);
     method public void setType(String);
@@ -61,9 +85,13 @@
 
   public class MediaCodecs {
     ctor public MediaCodecs();
-    method public java.util.List<media.codecs.Decoders> getDecoders();
-    method public java.util.List<media.codecs.Encoders> getEncoders();
-    method public java.util.List<media.codecs.Settings> getSettings();
+    method public media.codecs.Decoders getDecoders_optional();
+    method public media.codecs.Encoders getEncoders_optional();
+    method public java.util.List<media.codecs.Include> getInclude_optional();
+    method public media.codecs.Settings getSettings_optional();
+    method public void setDecoders_optional(media.codecs.Decoders);
+    method public void setEncoders_optional(media.codecs.Encoders);
+    method public void setSettings_optional(media.codecs.Settings);
   }
 
   public class Quirk {
@@ -89,6 +117,7 @@
 
   public class Type {
     ctor public Type();
+    method public java.util.List<media.codecs.Alias> getAlias();
     method public java.util.List<media.codecs.Feature> getFeature();
     method public java.util.List<media.codecs.Limit> getLimit();
     method public String getName();
@@ -99,7 +128,8 @@
 
   public class XmlParser {
     ctor public XmlParser();
-    method public static media.codecs.MediaCodecs read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+    method public static media.codecs.Included readIncluded(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+    method public static media.codecs.MediaCodecs readMediaCodecs(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
     method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
     method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
   }
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 4faba87..77193a2 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -20,11 +20,22 @@
            xmlns:xs="http://www.w3.org/2001/XMLSchema">
     <xs:element name="MediaCodecs">
         <xs:complexType>
-            <xs:sequence>
-                <xs:element name="Decoders" type="Decoders" maxOccurs="unbounded"/>
-                <xs:element name="Encoders" type="Encoders" maxOccurs="unbounded"/>
-                <xs:element name="Settings" type="Settings" maxOccurs="unbounded"/>
-            </xs:sequence>
+            <xs:choice minOccurs="0" maxOccurs="unbounded">
+                <xs:element name="Include" type="Include" maxOccurs="unbounded"/>
+                <xs:element name="Settings" type="Settings"/>
+                <xs:element name="Decoders" type="Decoders"/>
+                <xs:element name="Encoders" type="Encoders"/>
+            </xs:choice>
+        </xs:complexType>
+    </xs:element>
+    <xs:element name="Included">
+        <xs:complexType>
+            <xs:choice minOccurs="0" maxOccurs="unbounded">
+                <xs:element name="Include" type="Include" maxOccurs="unbounded"/>
+                <xs:element name="Settings" type="Settings"/>
+                <xs:element name="Decoders" type="Decoders"/>
+                <xs:element name="Encoders" type="Encoders"/>
+            </xs:choice>
         </xs:complexType>
     </xs:element>
     <xs:complexType name="Decoders">
@@ -43,12 +54,13 @@
         </xs:sequence>
     </xs:complexType>
     <xs:complexType name="MediaCodec">
-        <xs:sequence>
-            <xs:element name="Quirk" type="Quirk" maxOccurs="unbounded"/>
-            <xs:element name="Type" type="Type" maxOccurs="unbounded"/>
-            <xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
-            <xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
-        </xs:sequence>
+        <xs:choice minOccurs="0" maxOccurs="unbounded">
+            <xs:element name="Quirk" type="Quirk" minOccurs="0" maxOccurs="unbounded"/>
+            <xs:element name="Type" type="Type" minOccurs="0" maxOccurs="unbounded"/>
+            <xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
+            <xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
+            <xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
+        </xs:choice>
         <xs:attribute name="name" type="xs:string"/>
         <xs:attribute name="type" type="xs:string"/>
         <xs:attribute name="update" type="xs:string"/>
@@ -58,12 +70,16 @@
     </xs:complexType>
     <xs:complexType name="Type">
         <xs:sequence>
-            <xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
-            <xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
+            <xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
+            <xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
+            <xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
         </xs:sequence>
         <xs:attribute name="name" type="xs:string"/>
         <xs:attribute name="update" type="xs:string"/>
     </xs:complexType>
+    <xs:complexType name="Alias">
+        <xs:attribute name="name" type="xs:string"/>
+    </xs:complexType>
     <xs:complexType name="Limit">
         <xs:attribute name="name" type="xs:string"/>
         <xs:attribute name="default" type="xs:string"/>
@@ -86,4 +102,7 @@
         <xs:attribute name="value" type="xs:string"/>
         <xs:attribute name="update" type="xs:string"/>
     </xs:complexType>
+    <xs:complexType name="Include">
+        <xs:attribute name="href" type="xs:string"/>
+    </xs:complexType>
 </xs:schema>
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ed88cf3..51138c8 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -324,6 +324,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_GENRE = "genre";
 EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
 EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT = "haptic-channel-count";
 EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
 EXPORT const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO = "hdr10-plus-info";
 EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 259481d..fd43f36 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -209,6 +209,7 @@
 extern const char* AMEDIAFORMAT_KEY_EXIF_SIZE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_FRAME_COUNT __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_GENRE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_ICC_PROFILE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 4725e9e..f666ad0 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -89,6 +89,7 @@
     AMEDIAFORMAT_KEY_GENRE; # var introduced=29
     AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
     AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+    AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT; # var introduced=29
     AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
     AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
     AMEDIAFORMAT_KEY_ICC_PROFILE; # var introduced=29
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index ec5dfb1..8ac3366 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -615,9 +615,9 @@
         virtual binder::Status   stop();
         virtual binder::Status   getActiveMicrophones(
                 std::vector<media::MicrophoneInfo>* activeMicrophones);
-        virtual binder::Status   setMicrophoneDirection(
+        virtual binder::Status   setPreferredMicrophoneDirection(
                 int /*audio_microphone_direction_t*/ direction);
-        virtual binder::Status   setMicrophoneFieldDimension(float zoom);
+        virtual binder::Status   setPreferredMicrophoneFieldDimension(float zoom);
 
     private:
         const sp<RecordThread::RecordTrack> mRecordTrack;
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index ab4af33..ec1f86c 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -71,8 +71,8 @@
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
-            status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
-            status_t    setMicrophoneFieldDimension(float zoom);
+            status_t    setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+            status_t    setPreferredMicrophoneFieldDimension(float zoom);
 
     static  bool        checkServerLatencySupported(
                                 audio_format_t format, audio_input_flags_t flags) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3ecb37d..e94fb49 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7718,18 +7718,19 @@
     return status;
 }
 
-status_t AudioFlinger::RecordThread::setMicrophoneDirection(audio_microphone_direction_t direction)
+status_t AudioFlinger::RecordThread::setPreferredMicrophoneDirection(
+            audio_microphone_direction_t direction)
 {
-    ALOGV("setMicrophoneDirection(%d)", direction);
+    ALOGV("setPreferredMicrophoneDirection(%d)", direction);
     AutoMutex _l(mLock);
-    return mInput->stream->setMicrophoneDirection(direction);
+    return mInput->stream->setPreferredMicrophoneDirection(direction);
 }
 
-status_t AudioFlinger::RecordThread::setMicrophoneFieldDimension(float zoom)
+status_t AudioFlinger::RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
 {
-    ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+    ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
     AutoMutex _l(mLock);
-    return mInput->stream->setMicrophoneFieldDimension(zoom);
+    return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
 }
 
 void AudioFlinger::RecordThread::updateMetadata_l()
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 47e580b..e5abce7 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1607,8 +1607,8 @@
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
-            status_t    setMicrophoneDirection(audio_microphone_direction_t direction);
-            status_t    setMicrophoneFieldDimension(float zoom);
+            status_t    setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+            status_t    setPreferredMicrophoneFieldDimension(float zoom);
 
             void        updateMetadata_l() override;
 
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5a43696..fbf8fef 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1838,16 +1838,16 @@
             mRecordTrack->getActiveMicrophones(activeMicrophones));
 }
 
-binder::Status AudioFlinger::RecordHandle::setMicrophoneDirection(
+binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
         int /*audio_microphone_direction_t*/ direction) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(mRecordTrack->setMicrophoneDirection(
+    return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
             static_cast<audio_microphone_direction_t>(direction)));
 }
 
-binder::Status AudioFlinger::RecordHandle::setMicrophoneFieldDimension(float zoom) {
+binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(mRecordTrack->setMicrophoneFieldDimension(zoom));
+    return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
 }
 
 // ----------------------------------------------------------------------------
@@ -2144,22 +2144,22 @@
     }
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneDirection(
+status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
         audio_microphone_direction_t direction) {
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        return recordThread->setMicrophoneDirection(direction);
+        return recordThread->setPreferredMicrophoneDirection(direction);
     } else {
         return BAD_VALUE;
     }
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneFieldDimension(float zoom) {
+status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        return recordThread->setMicrophoneFieldDimension(zoom);
+        return recordThread->setPreferredMicrophoneFieldDimension(zoom);
     } else {
         return BAD_VALUE;
     }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index bcb6d77..762a4b1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -4828,6 +4828,7 @@
         ALOGW("closeOutput() unknown output %d", output);
         return;
     }
+    const bool closingOutputWasActive = closingOutput->isActive();
     mPolicyMixes.closeOutput(closingOutput);
 
     // look for duplicated outputs connected to the output being removed.
@@ -4867,6 +4868,9 @@
         mpClientInterface->onAudioPatchListUpdate();
     }
 
+    if (closingOutputWasActive) {
+        closingOutput->stop();
+    }
     closingOutput->close();
 
     removeOutput(output);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index a672521..ea6ca39 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -376,15 +376,17 @@
         return PERMISSION_DENIED;
     }
 
+    bool canCaptureOutput = captureAudioOutputAllowed(pid, uid);
     if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
         attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
         attr->source == AUDIO_SOURCE_VOICE_CALL ||
         attr->source == AUDIO_SOURCE_ECHO_REFERENCE) &&
-        !captureAudioOutputAllowed(pid, uid)) {
+        !canCaptureOutput) {
         return PERMISSION_DENIED;
     }
 
-    if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed(pid, uid)) {
+    bool canCaptureHotword = captureHotwordAllowed(pid, uid);
+    if ((attr->source == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
         return BAD_VALUE;
     }
 
@@ -415,7 +417,7 @@
             case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
                 // FIXME: use the same permission as for remote submix for now.
             case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
-                if (!captureAudioOutputAllowed(pid, uid)) {
+                if (!canCaptureOutput) {
                     ALOGE("getInputForAttr() permission denied: capture not allowed");
                     status = PERMISSION_DENIED;
                 }
@@ -442,7 +444,8 @@
         }
 
         sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
-                                                             *selectedDeviceId, opPackageName);
+                                                             *selectedDeviceId, opPackageName,
+                                                             canCaptureOutput, canCaptureHotword);
         mAudioRecordClients.add(*portId, client);
     }
 
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 8cbf3af..e858e8d 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -414,32 +414,35 @@
 {
 //    Go over all active clients and allow capture (does not force silence) in the
 //    following cases:
-//    The client is the assistant
+//    Another client in the same UID has already been allowed to capture
+//    OR The client is the assistant
 //        AND an accessibility service is on TOP
 //               AND the source is VOICE_RECOGNITION or HOTWORD
 //        OR uses VOICE_RECOGNITION AND is on TOP OR latest started
 //               OR uses HOTWORD
-//            AND there is no privacy sensitive active capture
+//            AND there is no active privacy sensitive capture or call
+//                OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 //    OR The client is an accessibility service
 //        AND is on TOP OR latest started
 //        AND the source is VOICE_RECOGNITION or HOTWORD
-//    OR the source is one of: AUDIO_SOURCE_VOICE_DOWNLINK, AUDIO_SOURCE_VOICE_UPLINK,
-//       AUDIO_SOURCE_VOICE_CALL
+//    OR the client source is virtual (remote submix, call audio TX or RX...)
 //    OR Any other client
 //        AND The assistant is not on TOP
-//        AND is on TOP OR latest started
-//        AND there is no privacy sensitive active capture
+//        AND there is no active privacy sensitive capture or call
+//                OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 //TODO: mamanage pre processing effects according to use case priority
 
     sp<AudioRecordClient> topActive;
     sp<AudioRecordClient> latestActive;
     sp<AudioRecordClient> latestSensitiveActive;
+
     nsecs_t topStartNs = 0;
     nsecs_t latestStartNs = 0;
     nsecs_t latestSensitiveStartNs = 0;
     bool isA11yOnTop = mUidPolicy->isA11yOnTop();
     bool isAssistantOnTop = false;
     bool isSensitiveActive = false;
+    bool isInCall = mPhoneState == AUDIO_MODE_IN_CALL;
 
     // if Sensor Privacy is enabled then all recordings should be silenced.
     if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
@@ -449,15 +452,18 @@
 
     for (size_t i =0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
-        if (!current->active) continue;
-        if (isPrivacySensitiveSource(current->attributes.source)) {
-            if (current->startTimeNs > latestSensitiveStartNs) {
-                latestSensitiveActive = current;
-                latestSensitiveStartNs = current->startTimeNs;
-            }
-            isSensitiveActive = true;
+        if (!current->active) {
+            continue;
         }
-        if (mUidPolicy->getUidState(current->uid) == ActivityManager::PROCESS_STATE_TOP) {
+
+        app_state_t appState = apmStatFromAmState(mUidPolicy->getUidState(current->uid));
+        // clients which app is in IDLE state are not eligible for top active or
+        // latest active
+        if (appState == APP_STATE_IDLE) {
+            continue;
+        }
+
+        if (appState == APP_STATE_TOP) {
             if (current->startTimeNs > topStartNs) {
                 topActive = current;
                 topStartNs = current->startTimeNs;
@@ -470,72 +476,105 @@
             latestActive = current;
             latestStartNs = current->startTimeNs;
         }
+        if (isPrivacySensitiveSource(current->attributes.source)) {
+            if (current->startTimeNs > latestSensitiveStartNs) {
+                latestSensitiveActive = current;
+                latestSensitiveStartNs = current->startTimeNs;
+            }
+            isSensitiveActive = true;
+        }
     }
 
-    if (topActive == nullptr && latestActive == nullptr) {
-        return;
+    // if no active client with UI on Top, consider latest active as top
+    if (topActive == nullptr) {
+        topActive = latestActive;
     }
 
-    if (topActive != nullptr) {
-        latestActive = nullptr;
-    }
+    std::vector<uid_t> enabledUids;
 
     for (size_t i =0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
-        if (!current->active) continue;
+        if (!current->active) {
+            continue;
+        }
+
+        // keep capture allowed if another client with the same UID has already
+        // been allowed to capture
+        if (std::find(enabledUids.begin(), enabledUids.end(), current->uid)
+                != enabledUids.end()) {
+            continue;
+        }
 
         audio_source_t source = current->attributes.source;
-        bool isOnTop = current == topActive;
-        bool isLatest = current == latestActive;
-        bool isLatestSensitive = current == latestSensitiveActive;
-        bool forceIdle = true;
+        bool isTopOrLatestActive = topActive == nullptr ? false : current->uid == topActive->uid;
+        bool isLatestSensitive = latestSensitiveActive == nullptr ?
+                                 false : current->uid == latestSensitiveActive->uid;
+
+        // By default allow capture if:
+        //     The assistant is not on TOP
+        //     AND there is no active privacy sensitive capture or call
+        //             OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+        bool allowCapture = !isAssistantOnTop
+                && !(isSensitiveActive && !(isLatestSensitive || current->canCaptureOutput))
+                && !(isInCall && !current->canCaptureOutput);
 
         if (isVirtualSource(source)) {
-            forceIdle = false;
+            // Allow capture for virtual (remote submix, call audio TX or RX...) sources
+            allowCapture = true;
         } else if (mUidPolicy->isAssistantUid(current->uid)) {
+            // For assistant allow capture if:
+            //     An accessibility service is on TOP
+            //            AND the source is VOICE_RECOGNITION or HOTWORD
+            //     OR is on TOP OR latest started AND uses VOICE_RECOGNITION
+            //            OR uses HOTWORD
+            //         AND there is no active privacy sensitive capture or call
+            //             OR client has CAPTURE_AUDIO_OUTPUT privileged permission
             if (isA11yOnTop) {
                 if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
-                    forceIdle = false;
+                    allowCapture = true;
                 }
             } else {
-                if ((((isOnTop || isLatest) && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
-                     source == AUDIO_SOURCE_HOTWORD) && !isSensitiveActive) {
-                    forceIdle = false;
+                if (((isTopOrLatestActive && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+                        source == AUDIO_SOURCE_HOTWORD) &&
+                        (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
+                    allowCapture = true;
                 }
             }
         } else if (mUidPolicy->isA11yUid(current->uid)) {
-            if ((isOnTop || isLatest) &&
-                (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
-                forceIdle = false;
-            }
-        } else {
-            if (!isAssistantOnTop && (isOnTop || isLatest) &&
-                (!isSensitiveActive || isLatestSensitive)) {
-                forceIdle = false;
+            // For accessibility service allow capture if:
+            //     Is on TOP OR latest started
+            //     AND the source is VOICE_RECOGNITION or HOTWORD
+            if (isTopOrLatestActive &&
+                    (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
+                allowCapture = true;
             }
         }
         setAppState_l(current->uid,
-                      forceIdle ? APP_STATE_IDLE :
-                                  apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
+                      allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(current->uid)) :
+                                APP_STATE_IDLE);
+        if (allowCapture) {
+            enabledUids.push_back(current->uid);
+        }
     }
 }
 
 void AudioPolicyService::silenceAllRecordings_l() {
     for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
-        setAppState_l(current->uid, APP_STATE_IDLE);
+        if (!isVirtualSource(current->attributes.source)) {
+            setAppState_l(current->uid, APP_STATE_IDLE);
+        }
     }
 }
 
 /* static */
 app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
-    switch (amState) {
-    case ActivityManager::PROCESS_STATE_UNKNOWN:
+
+    if (amState == ActivityManager::PROCESS_STATE_UNKNOWN) {
         return APP_STATE_IDLE;
-    case ActivityManager::PROCESS_STATE_TOP:
-        return APP_STATE_TOP;
-    default:
-        break;
+    } else if (amState <= ActivityManager::PROCESS_STATE_TOP) {
+      // include persistent services
+      return APP_STATE_TOP;
     }
     return APP_STATE_FOREGROUND;
 }
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a2e75cd..160f70f 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -753,13 +753,17 @@
                 AudioRecordClient(const audio_attributes_t attributes,
                           const audio_io_handle_t io, uid_t uid, pid_t pid,
                           const audio_session_t session, const audio_port_handle_t deviceId,
-                          const String16& opPackageName) :
+                          const String16& opPackageName,
+                          bool canCaptureOutput, bool canCaptureHotword) :
                     AudioClient(attributes, io, uid, pid, session, deviceId),
-                    opPackageName(opPackageName), startTimeNs(0) {}
+                    opPackageName(opPackageName), startTimeNs(0),
+                    canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
                 ~AudioRecordClient() override = default;
 
         const String16 opPackageName;        // client package name
         nsecs_t startTimeNs;
+        const bool canCaptureOutput;
+        const bool canCaptureHotword;
     };
 
     // --- AudioPlaybackClient ---
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 0571741..12ff130 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -321,7 +321,7 @@
     // so. As documented in hardware/camera3.h:configure_streams().
     if (mState == STATE_IN_RECONFIG &&
             mOldUsage == mUsage &&
-            mOldMaxBuffers == camera3_stream::max_buffers) {
+            mOldMaxBuffers == camera3_stream::max_buffers && !mDataSpaceOverridden) {
         mState = STATE_CONFIGURED;
         return OK;
     }
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index a87812b..c2ed23a 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -97,6 +97,21 @@
     return outputConfiguration;
 }
 
+hardware::camera2::params::SessionConfiguration convertFromHidl(
+    const HSessionConfiguration &hSessionConfiguration) {
+    hardware::camera2::params::SessionConfiguration sessionConfig(
+            hSessionConfiguration.inputWidth, hSessionConfiguration.inputHeight,
+            hSessionConfiguration.inputFormat,
+            static_cast<int>(hSessionConfiguration.operationMode));
+
+    for (const auto& hConfig : hSessionConfiguration.outputStreams) {
+        hardware::camera2::params::OutputConfiguration config = convertFromHidl(hConfig);
+        sessionConfig.addOutputConfiguration(config);
+    }
+
+    return sessionConfig;
+}
+
 // The camera metadata here is cloned. Since we're reading metadata over
 // hwbinder we would need to clone it in order to avoid aligment issues.
 bool convertFromHidl(const HCameraMetadata &src, CameraMetadata *dst) {
diff --git a/services/camera/libcameraservice/hidl/Convert.h b/services/camera/libcameraservice/hidl/Convert.h
index 82937a3..79683f6 100644
--- a/services/camera/libcameraservice/hidl/Convert.h
+++ b/services/camera/libcameraservice/hidl/Convert.h
@@ -53,6 +53,7 @@
 using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
 using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
 using HPhysicalCaptureResultInfo = frameworks::cameraservice::device::V2_0::PhysicalCaptureResultInfo;
+using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
 using HSubmitInfo = frameworks::cameraservice::device::V2_0::SubmitInfo;
 using HStatus = frameworks::cameraservice::common::V2_0::Status;
 using HStreamConfigurationMode = frameworks::cameraservice::device::V2_0::StreamConfigurationMode;
@@ -70,6 +71,9 @@
 hardware::camera2::params::OutputConfiguration convertFromHidl(
     const HOutputConfiguration &hOutputConfiguration);
 
+hardware::camera2::params::SessionConfiguration convertFromHidl(
+    const HSessionConfiguration &hSessionConfiguration);
+
 HCameraDeviceStatus convertToHidlCameraDeviceStatus(int32_t status);
 
 void convertToHidl(const std::vector<hardware::CameraStatus> &src,
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
index d22ba5a..675ad24 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
@@ -41,6 +41,7 @@
 using hardware::Void;
 using HSubmitInfo = device::V2_0::SubmitInfo;
 using hardware::camera2::params::OutputConfiguration;
+using hardware::camera2::params::SessionConfiguration;
 
 static constexpr int32_t CAMERA_REQUEST_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
 static constexpr int32_t CAMERA_RESULT_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
@@ -255,6 +256,18 @@
     return B2HStatus(ret);
 }
 
+Return<void> HidlCameraDeviceUser::isSessionConfigurationSupported(
+    const HSessionConfiguration& hSessionConfiguration,
+    isSessionConfigurationSupported_cb _hidl_cb) {
+    bool supported = false;
+    SessionConfiguration sessionConfiguration = convertFromHidl(hSessionConfiguration);
+    binder::Status ret = mDeviceRemote->isSessionConfigurationSupported(
+            sessionConfiguration, &supported);
+    HStatus status = B2HStatus(ret);
+    _hidl_cb(status, supported);
+    return Void();
+}
+
 } // implementation
 } // V2_0
 } // device
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
index be8f1d6..c3a80fe 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
@@ -53,6 +53,7 @@
 using HCameraDeviceUser = device::V2_0::ICameraDeviceUser;
 using HCameraMetadata = cameraservice::service::V2_0::CameraMetadata;
 using HCaptureRequest = device::V2_0::CaptureRequest;
+using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
 using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
 using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
 using HStatus = frameworks::cameraservice::common::V2_0::Status;
@@ -97,6 +98,10 @@
     virtual Return<HStatus> updateOutputConfiguration(
         int32_t streamId, const HOutputConfiguration& outputConfiguration) override;
 
+    virtual Return<void> isSessionConfigurationSupported(
+        const HSessionConfiguration& sessionConfiguration,
+        isSessionConfigurationSupported_cb _hidl_cb) override;
+
     bool initStatus() { return mInitSuccess; }
 
     std::shared_ptr<CaptureResultMetadataQueue> getCaptureResultMetadataQueue() {