Merge "Use bufferpool@2.0"
diff --git a/include/media/VolumeShaper.h b/include/media/VolumeShaper.h
index a3aaece..79afd6c 100644
--- a/include/media/VolumeShaper.h
+++ b/include/media/VolumeShaper.h
@@ -551,7 +551,7 @@
 
     static int64_t convertTimespecToUs(const struct timespec &tv)
     {
-        return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
+        return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
     }
 
     // current monotonic time in microseconds.
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 7990ee5..50b4d20 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -489,6 +489,13 @@
     }
 
     ALOGV("start processing frame #%" PRIu64, work->input.ordinal.frameIndex.peeku());
+    // If input buffer list is not empty, it means we have some input to process on.
+    // However, input could be a null buffer. In such case, clear the buffer list
+    // before making call to process().
+    if (!work->input.buffers.empty() && !work->input.buffers[0]) {
+        ALOGD("Encountered null input buffer. Clearing the input buffer");
+        work->input.buffers.clear();
+    }
     process(work, mOutputBlockPool);
     ALOGV("processed frame #%" PRIu64, work->input.ordinal.frameIndex.peeku());
     {
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 8d2a652..5c83481 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -83,6 +83,18 @@
                 DefineParam(mInputMaxBufSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
                 .withConstValue(new C2StreamMaxBufferSizeInfo::input(0u, 64 * 1024))
                 .build());
+
+        addParameter(
+                DefineParam(mPcmEncodingInfo, C2_PARAMKEY_PCM_ENCODING)
+                .withDefault(new C2StreamPcmEncodingInfo::output(0u, C2Config::PCM_16))
+                .withFields({C2F(mPcmEncodingInfo, value).oneOf({
+                     C2Config::PCM_16,
+                     C2Config::PCM_8,
+                     C2Config::PCM_FLOAT})
+                })
+                .withSetter((Setter<decltype(*mPcmEncodingInfo)>::StrictValueWithNoDeps))
+                .build());
+
     }
 
 private:
@@ -94,6 +106,7 @@
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
     std::shared_ptr<C2BitrateTuning::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
+    std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
 };
 
 C2SoftRawDec::C2SoftRawDec(
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 01de681..8ecbf5d 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -97,6 +97,26 @@
                 .withSetter(ProfileLevelSetter, mSize)
                 .build());
 
+        mHdr10PlusInfoInput = C2StreamHdr10PlusInfo::input::AllocShared(0);
+        addParameter(
+                DefineParam(mHdr10PlusInfoInput, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO)
+                .withDefault(mHdr10PlusInfoInput)
+                .withFields({
+                    C2F(mHdr10PlusInfoInput, m.value).any(),
+                })
+                .withSetter(Hdr10PlusInfoInputSetter)
+                .build());
+
+        mHdr10PlusInfoOutput = C2StreamHdr10PlusInfo::output::AllocShared(0);
+        addParameter(
+                DefineParam(mHdr10PlusInfoOutput, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO)
+                .withDefault(mHdr10PlusInfoOutput)
+                .withFields({
+                    C2F(mHdr10PlusInfoOutput, m.value).any(),
+                })
+                .withSetter(Hdr10PlusInfoOutputSetter)
+                .build());
+
 #if 0
         // sample BT.2020 static info
         mHdrStaticInfo = std::make_shared<C2StreamHdrStaticInfo::output>();
@@ -217,6 +237,18 @@
         return C2R::Ok();
     }
 
+    static C2R Hdr10PlusInfoInputSetter(bool mayBlock, C2P<C2StreamHdr10PlusInfo::input> &me) {
+        (void)mayBlock;
+        (void)me;  // TODO: validate
+        return C2R::Ok();
+    }
+
+    static C2R Hdr10PlusInfoOutputSetter(bool mayBlock, C2P<C2StreamHdr10PlusInfo::output> &me) {
+        (void)mayBlock;
+        (void)me;  // TODO: validate
+        return C2R::Ok();
+    }
+
 private:
     std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
     std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
@@ -228,6 +260,8 @@
 #if 0
     std::shared_ptr<C2StreamHdrStaticInfo::output> mHdrStaticInfo;
 #endif
+    std::shared_ptr<C2StreamHdr10PlusInfo::input> mHdr10PlusInfoInput;
+    std::shared_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfoOutput;
 #endif
 };
 
@@ -370,7 +404,8 @@
                            const std::shared_ptr<C2GraphicBlock> &block) {
     std::shared_ptr<C2Buffer> buffer = createGraphicBuffer(block,
                                                            C2Rect(mWidth, mHeight));
-    auto fillWork = [buffer, index](const std::unique_ptr<C2Work> &work) {
+    auto fillWork = [buffer, index, intf = this->mIntf](
+            const std::unique_ptr<C2Work> &work) {
         uint32_t flags = 0;
         if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
                 (c2_cntr64_t(index) == work->input.ordinal.frameIndex)) {
@@ -382,6 +417,28 @@
         work->worklets.front()->output.buffers.push_back(buffer);
         work->worklets.front()->output.ordinal = work->input.ordinal;
         work->workletsProcessed = 1u;
+
+        for (const std::unique_ptr<C2Param> &param: work->input.configUpdate) {
+            if (param) {
+                C2StreamHdr10PlusInfo::input *hdr10PlusInfo =
+                        C2StreamHdr10PlusInfo::input::From(param.get());
+
+                if (hdr10PlusInfo != nullptr) {
+                    std::vector<std::unique_ptr<C2SettingResult>> failures;
+                    std::unique_ptr<C2Param> outParam = C2Param::CopyAsStream(
+                            *param.get(), true /*output*/, param->stream());
+                    c2_status_t err = intf->config(
+                            { outParam.get() }, C2_MAY_BLOCK, &failures);
+                    if (err == C2_OK) {
+                        work->worklets.front()->output.configUpdate.push_back(
+                                C2Param::Copy(*outParam.get()));
+                    } else {
+                        ALOGE("finishWork: Config update size failed");
+                    }
+                    break;
+                }
+            }
+        }
     };
     if (work && c2_cntr64_t(index) == work->input.ordinal.frameIndex) {
         fillWork(work);
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 799ade4..27aa064 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -194,6 +194,7 @@
     kParamIndexLayerIndex,
     kParamIndexLayerCount,
     kParamIndexIntraRefresh,
+    kParamIndexHdr10PlusMetadata,
 
     /* ------------------------------------ image components ------------------------------------ */
 
@@ -1560,6 +1561,14 @@
         C2StreamHdrStaticInfo;
 constexpr char C2_PARAMKEY_HDR_STATIC_INFO[] = "raw.hdr-static-info";
 
+/**
+ * HDR10+ Metadata Info.
+ */
+typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexHdr10PlusMetadata>
+        C2StreamHdr10PlusInfo;
+constexpr char C2_PARAMKEY_INPUT_HDR10_PLUS_INFO[] = "input.hdr10-plus-info";
+constexpr char C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO[] = "output.hdr10-plus-info";
+
 /* ------------------------------------ block-based coding ----------------------------------- */
 
 /**
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index f903bbb..852d6d6 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1348,9 +1348,7 @@
 }
 
 void CCodec::signalSetParameters(const sp<AMessage> &params) {
-    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
-    msg->setMessage("params", params);
-    msg->post();
+    setParameters(params);
 }
 
 void CCodec::setParameters(const sp<AMessage> &params) {
@@ -1515,13 +1513,6 @@
             setInputSurface(surface);
             break;
         }
-        case kWhatSetParameters: {
-            setDeadline(now, 50ms, "setParameters");
-            sp<AMessage> params;
-            CHECK(msg->findMessage("params", &params));
-            setParameters(params);
-            break;
-        }
         case kWhatWorkDone: {
             std::unique_ptr<C2Work> work;
             size_t numDiscardedInputBuffers;
@@ -1594,6 +1585,7 @@
                     C2StreamColorAspectsInfo::output::PARAM_TYPE,
                     C2StreamDataSpaceInfo::output::PARAM_TYPE,
                     C2StreamHdrStaticInfo::output::PARAM_TYPE,
+                    C2StreamHdr10PlusInfo::output::PARAM_TYPE,
                     C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
                     C2StreamSurfaceScalingInfo::output::PARAM_TYPE
                 };
@@ -1677,7 +1669,7 @@
         deadline->set(std::chrono::steady_clock::now() + 3s, "eos");
     }
     // TODO: query and use input/pipeline/output delay combined
-    if (count >= 8) {
+    if (count >= 4) {
         CCodecWatchdog::getInstance()->watch(this);
         Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
         deadline->set(std::chrono::steady_clock::now() + 3s, "queue");
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 01b9c1e..55a97d8 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -787,8 +787,13 @@
     std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
             size_t size) final {
         int32_t capacity = kLinearBufferSize;
-        (void)mFormat->findInt32(C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING, &capacity);
-
+        (void)mFormat->findInt32(KEY_MAX_INPUT_SIZE, &capacity);
+        if ((size_t)capacity > kMaxLinearBufferSize) {
+            ALOGD("client requested %d, capped to %zu", capacity, kMaxLinearBufferSize);
+            capacity = kMaxLinearBufferSize;
+        }
+        // TODO: proper max input size
+        // TODO: read usage from intf
         std::unique_ptr<InputBuffersArray> array(
                 new InputBuffersArray(mComponentName.c_str(), "1D-Input[N]"));
         array->setPool(mPool);
@@ -1807,17 +1812,29 @@
 
 status_t CCodecBufferChannel::renderOutputBuffer(
         const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+    ALOGV("[%s] renderOutputBuffer: %p", mName, buffer.get());
     std::shared_ptr<C2Buffer> c2Buffer;
+    bool released = false;
     {
         Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
         if (*buffers) {
-            (*buffers)->releaseBuffer(buffer, &c2Buffer);
+            released = (*buffers)->releaseBuffer(buffer, &c2Buffer);
         }
     }
+    // NOTE: some apps try to releaseOutputBuffer() with timestamp and/or render
+    //       set to true.
+    sendOutputBuffers();
+    // input buffer feeding may have been gated by pending output buffers
+    feedInputBufferIfAvailable();
     if (!c2Buffer) {
+        if (released) {
+            ALOGD("[%s] The app is calling releaseOutputBuffer() with "
+                  "timestamp or render=true with non-video buffers. Apps should "
+                  "call releaseOutputBuffer() with render=false for those.",
+                  mName);
+        }
         return INVALID_OPERATION;
     }
-    sendOutputBuffers();
 
 #if 0
     const std::vector<std::shared_ptr<const C2Info>> infoParams = c2Buffer->info();
@@ -1871,6 +1888,11 @@
         std::static_pointer_cast<const C2StreamHdrStaticInfo::output>(
                 c2Buffer->getInfo(C2StreamHdrStaticInfo::output::PARAM_TYPE));
 
+    // HDR10 plus info
+    std::shared_ptr<const C2StreamHdr10PlusInfo::output> hdr10PlusInfo =
+        std::static_pointer_cast<const C2StreamHdr10PlusInfo::output>(
+                c2Buffer->getInfo(C2StreamHdr10PlusInfo::output::PARAM_TYPE));
+
     {
         Mutexed<OutputSurface>::Locked output(mOutputSurface);
         if (output->surface == nullptr) {
@@ -1898,35 +1920,45 @@
             videoScalingMode,
             transform,
             Fence::NO_FENCE, 0);
-    if (hdrStaticInfo) {
-        struct android_smpte2086_metadata smpte2086_meta = {
-            .displayPrimaryRed = {
-                hdrStaticInfo->mastering.red.x, hdrStaticInfo->mastering.red.y
-            },
-            .displayPrimaryGreen = {
-                hdrStaticInfo->mastering.green.x, hdrStaticInfo->mastering.green.y
-            },
-            .displayPrimaryBlue = {
-                hdrStaticInfo->mastering.blue.x, hdrStaticInfo->mastering.blue.y
-            },
-            .whitePoint = {
-                hdrStaticInfo->mastering.white.x, hdrStaticInfo->mastering.white.y
-            },
-            .maxLuminance = hdrStaticInfo->mastering.maxLuminance,
-            .minLuminance = hdrStaticInfo->mastering.minLuminance,
-        };
-
-        struct android_cta861_3_metadata cta861_meta = {
-            .maxContentLightLevel = hdrStaticInfo->maxCll,
-            .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
-        };
-
+    if (hdrStaticInfo || hdr10PlusInfo) {
         HdrMetadata hdr;
-        hdr.validTypes = HdrMetadata::SMPTE2086 | HdrMetadata::CTA861_3;
-        hdr.smpte2086 = smpte2086_meta;
-        hdr.cta8613 = cta861_meta;
+        if (hdrStaticInfo) {
+            struct android_smpte2086_metadata smpte2086_meta = {
+                .displayPrimaryRed = {
+                    hdrStaticInfo->mastering.red.x, hdrStaticInfo->mastering.red.y
+                },
+                .displayPrimaryGreen = {
+                    hdrStaticInfo->mastering.green.x, hdrStaticInfo->mastering.green.y
+                },
+                .displayPrimaryBlue = {
+                    hdrStaticInfo->mastering.blue.x, hdrStaticInfo->mastering.blue.y
+                },
+                .whitePoint = {
+                    hdrStaticInfo->mastering.white.x, hdrStaticInfo->mastering.white.y
+                },
+                .maxLuminance = hdrStaticInfo->mastering.maxLuminance,
+                .minLuminance = hdrStaticInfo->mastering.minLuminance,
+            };
+
+            struct android_cta861_3_metadata cta861_meta = {
+                .maxContentLightLevel = hdrStaticInfo->maxCll,
+                .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
+            };
+
+            hdr.validTypes = HdrMetadata::SMPTE2086 | HdrMetadata::CTA861_3;
+            hdr.smpte2086 = smpte2086_meta;
+            hdr.cta8613 = cta861_meta;
+        }
+        if (hdr10PlusInfo) {
+            hdr.validTypes |= HdrMetadata::HDR10PLUS;
+            hdr.hdr10plus.assign(
+                    hdr10PlusInfo->m.value,
+                    hdr10PlusInfo->m.value + hdr10PlusInfo->flexCount());
+        }
         qbi.setHdrMetadata(hdr);
     }
+    // we don't have dirty regions
+    qbi.setSurfaceDamage(Region::INVALID_REGION);
     android::IGraphicBufferProducer::QueueBufferOutput qbo;
     status_t result = mComponent->queueToOutputSurface(block, qbi, &qbo);
     if (result != OK) {
@@ -1961,8 +1993,8 @@
         }
     }
     if (released) {
-        feedInputBufferIfAvailable();
         sendOutputBuffers();
+        feedInputBufferIfAvailable();
     } else {
         ALOGD("[%s] MediaCodec discarded an unknown buffer", mName);
     }
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 8dbfd0e..ef02e74 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -570,6 +570,12 @@
     add(ConfigMapper("csd-0",           C2_PARAMKEY_INIT_DATA,       "value")
         .limitTo(D::OUTPUT & D::READ));
 
+    add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO, "value")
+        .limitTo(D::VIDEO & D::PARAM & D::INPUT));
+
+    add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO, "value")
+        .limitTo(D::VIDEO & D::OUTPUT));
+
     add(ConfigMapper(C2_PARAMKEY_TEMPORAL_LAYERING, C2_PARAMKEY_TEMPORAL_LAYERING, "")
         .limitTo(D::ENCODER & D::VIDEO & D::OUTPUT));
 
@@ -624,7 +630,23 @@
         .limitTo(D::AUDIO & D::CODED));
 
     add(ConfigMapper(KEY_PCM_ENCODING,  C2_PARAMKEY_PCM_ENCODING,       "value")
-        .limitTo(D::AUDIO));
+        .limitTo(D::AUDIO)
+        .withMappers([](C2Value v) -> C2Value {
+            int32_t value;
+            C2Config::pcm_encoding_t to;
+            if (v.get(&value) && C2Mapper::map(value, &to)) {
+                return to;
+            }
+            return C2Value();
+        }, [](C2Value v) -> C2Value {
+            C2Config::pcm_encoding_t value;
+            int32_t to;
+            using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
+            if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
+                return to;
+            }
+            return C2Value();
+        }));
 
     add(ConfigMapper(KEY_IS_ADTS, C2_PARAMKEY_AAC_PACKAGING, "value")
         .limitTo(D::AUDIO & D::CODED)
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index bf6062e..1113ae8 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -109,9 +109,11 @@
 
 // DummyContainerBuffer
 
+static uint8_t sDummyByte[1] = { 0 };
+
 DummyContainerBuffer::DummyContainerBuffer(
         const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer)
-    : Codec2Buffer(format, new ABuffer(nullptr, 1)),
+    : Codec2Buffer(format, new ABuffer(sDummyByte, 1)),
       mBufferRef(buffer) {
     setRange(0, buffer ? 1 : 0);
 }
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index b7519da..84d22a3 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -88,16 +88,30 @@
 
         uint32_t planeW = img->mWidth / plane.colSampling;
         uint32_t planeH = img->mHeight / plane.rowSampling;
-        for (uint32_t row = 0; row < planeH; ++row) {
-            decltype(imgRow) imgPtr = imgRow;
-            decltype(viewRow) viewPtr = viewRow;
-            for (uint32_t col = 0; col < planeW; ++col) {
-                MemCopier<ToMediaImage, 0>::copy(imgPtr, viewPtr, bpp);
-                imgPtr += img->mPlane[i].mColInc;
-                viewPtr += plane.colInc;
+
+        bool canCopyByRow = (plane.colInc == 1) && (img->mPlane[i].mColInc == 1);
+        bool canCopyByPlane = canCopyByRow && (plane.rowInc == img->mPlane[i].mRowInc);
+        if (canCopyByPlane) {
+            MemCopier<ToMediaImage, 0>::copy(imgRow, viewRow, plane.rowInc * planeH);
+        } else if (canCopyByRow) {
+            for (uint32_t row = 0; row < planeH; ++row) {
+                MemCopier<ToMediaImage, 0>::copy(
+                        imgRow, viewRow, std::min(plane.rowInc, img->mPlane[i].mRowInc));
+                imgRow += img->mPlane[i].mRowInc;
+                viewRow += plane.rowInc;
             }
-            imgRow += img->mPlane[i].mRowInc;
-            viewRow += plane.rowInc;
+        } else {
+            for (uint32_t row = 0; row < planeH; ++row) {
+                decltype(imgRow) imgPtr = imgRow;
+                decltype(viewRow) viewPtr = viewRow;
+                for (uint32_t col = 0; col < planeW; ++col) {
+                    MemCopier<ToMediaImage, 0>::copy(imgPtr, viewPtr, bpp);
+                    imgPtr += img->mPlane[i].mColInc;
+                    viewPtr += plane.colInc;
+                }
+                imgRow += img->mPlane[i].mRowInc;
+                viewRow += plane.rowInc;
+            }
         }
     }
     return OK;
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 4bf1295..3a3d051 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -10,7 +10,6 @@
     shared_libs: [
         "libbinder_ndk",
         "liblog",
-        "libmediaextractor",
         "libmediandk",
     ],
 
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index 22b96e5..4e04605 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -586,9 +586,6 @@
 
 void FLACParser::releaseBuffers()
 {
-    CHECK(mGroup != NULL);
-    delete mGroup;
-    mGroup = NULL;
 }
 
 MediaBufferHelperV3 *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
index a95c2db..59a2e9b 100644
--- a/media/extractors/mp4/AC4Parser.cpp
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -310,13 +310,13 @@
                 pres_bytes += mBitReader.getBits(16);
             }
             ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
-            if (presentation_version > 1) {
+            if (presentation_version > 2) {
                 CHECK_BITS_LEFT(pres_bytes * 8);
                 mBitReader.skipBits(pres_bytes * 8);
                 continue;
             }
-            // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
-            // start with a presentation_config of 5 bits
+            // ac4_presentation_v0_dsi(), ac4_presentation_v1_dsi() and ac4_presentation_v2_dsi()
+            // all start with a presentation_config of 5 bits
             CHECK_BITS_LEFT(5);
             presentation_config = mBitReader.getBits(5);
             b_single_substream_group = (presentation_config == 0x1f);
@@ -363,7 +363,7 @@
             uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
             ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
                 dsi_frame_rate_multiply_info);
-            if (ac4_dsi_version == 1 && presentation_version == 1) {
+            if (ac4_dsi_version == 1 && (presentation_version == 1 || presentation_version == 2)) {
                 CHECK_BITS_LEFT(2);
                 uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
                 ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
@@ -386,7 +386,7 @@
                 ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
                     BOOLSTR(b_presentation_channel_coded));
                 if (b_presentation_channel_coded) {
-                    if (presentation_version == 1) {
+                    if (presentation_version == 1 || presentation_version == 2) {
                         CHECK_BITS_LEFT(5);
                         uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
                         mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
@@ -411,7 +411,7 @@
                     ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
                         presentation_channel_mask_v1);
                 }
-                if (presentation_version == 1) {
+                if (presentation_version == 1 || presentation_version == 2) {
                     CHECK_BITS_LEFT(1);
                     bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
                     ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 337ff2a..9fb2e35 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2733,8 +2733,7 @@
 
     // + 4-byte type
     offset += 4;
-    // at least for AC4 DSI v1 this is big enough
-    const uint32_t kAC4SpecificBoxPayloadSize = 256;
+    const uint32_t kAC4SpecificBoxPayloadSize = 1176;
     uint8_t chunk[kAC4SpecificBoxPayloadSize];
     ssize_t dsiSize = size - 8; // size of box - size and type fields
     if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index 01acb2c..b28877d 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -7,6 +7,10 @@
         "external/tremolo",
     ],
 
+    header_libs: [
+        "libaudio_system_headers",
+    ],
+
     shared_libs: [
         "liblog",
         "libmediaextractor",
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index a52ccb1..cc2c792 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -34,6 +34,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <media/stagefright/MetaDataUtils.h>
+#include <system/audio.h>
 #include <utils/String8.h>
 
 extern "C" {
@@ -133,6 +134,8 @@
 
     Vector<TOCEntry> mTableOfContents;
 
+    int32_t mHapticChannelCount;
+
     ssize_t readPage(off64_t offset, Page *page);
     status_t findNextPage(off64_t startOffset, off64_t *pageOffset);
 
@@ -163,6 +166,8 @@
 
     void buildTableOfContents();
 
+    void setChannelMask(int channelCount);
+
     MyOggExtractor(const MyOggExtractor &);
     MyOggExtractor &operator=(const MyOggExtractor &);
 };
@@ -310,7 +315,8 @@
       mMimeType(mimeType),
       mNumHeaders(numHeaders),
       mSeekPreRollUs(seekPreRollUs),
-      mFirstDataOffset(-1) {
+      mFirstDataOffset(-1),
+      mHapticChannelCount(0) {
     mCurrentPage.mNumSegments = 0;
 
     vorbis_info_init(&mVi);
@@ -1083,6 +1089,7 @@
     }
 
     parseFileMetaData();
+    setChannelMask(mChannelCount);
     return AMEDIA_OK;
 }
 
@@ -1157,6 +1164,7 @@
             }
 
             parseFileMetaData();
+            setChannelMask(mVi.channels);
             break;
         }
 
@@ -1192,6 +1200,29 @@
         parseVorbisComment(mFileMeta, comment, commentLength);
         //ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
     }
+
+    AMediaFormat_getInt32(mFileMeta, "haptic", &mHapticChannelCount);
+}
+
+void MyOggExtractor::setChannelMask(int channelCount) {
+    // Set channel mask according to channel count. When haptic channel count is found in
+    // file meta, set haptic channel mask to try haptic playback.
+    if (mHapticChannelCount > 0) {
+        const audio_channel_mask_t hapticChannelMask =
+                haptic_channel_mask_from_count(mHapticChannelCount);
+        const int32_t audioChannelCount = channelCount - mHapticChannelCount;
+        if (hapticChannelMask == AUDIO_CHANNEL_INVALID
+                || audioChannelCount <= 0 || audioChannelCount > FCC_8) {
+            ALOGE("Invalid haptic channel count found in metadata: %d", mHapticChannelCount);
+        } else {
+            const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
+                    audioChannelCount) | hapticChannelMask;
+            AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
+        }
+    } else {
+        AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK,
+                audio_channel_out_mask_from_count(channelCount));
+    }
 }
 
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index ec36ed7..3e91717 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -917,11 +917,11 @@
             config, flags, selectedDeviceId, portId);
 }
 
-status_t AudioSystem::startInput(audio_port_handle_t portId, bool *silenced)
+status_t AudioSystem::startInput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startInput(portId, silenced);
+    return aps->startInput(portId);
 }
 
 status_t AudioSystem::stopInput(audio_port_handle_t portId)
@@ -1315,6 +1315,13 @@
     return aps->setA11yServicesUids(uids);
 }
 
+bool AudioSystem::isHapticPlaybackSupported()
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+    return aps->isHapticPlaybackSupported();
+}
+
 
 // ---------------------------------------------------------------------------
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 02324ac..1f6dd60 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -30,9 +30,11 @@
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
 #include <media/IAudioFlinger.h>
+#include <media/IAudioPolicyService.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicyHelper.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/AudioSystem.h>
 #include <media/MediaAnalyticsItem.h>
 #include <media/TypeConverter.h>
 
@@ -64,7 +66,7 @@
 
 static int64_t convertTimespecToUs(const struct timespec &tv)
 {
-    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
+    return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
 }
 
 // TODO move to audio_utils.
@@ -157,6 +159,15 @@
     return NO_ERROR;
 }
 
+// static
+bool AudioTrack::isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) {
+    ALOGV("%s()", __FUNCTION__);
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+    return aps->isDirectOutputSupported(config, attributes);
+}
+
 // ---------------------------------------------------------------------------
 
 static std::string audioContentTypeString(audio_content_type_t value) {
@@ -465,16 +476,7 @@
                 __func__,
                  mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
         mStreamType = AUDIO_STREAM_DEFAULT;
-        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
-            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
-        }
-        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
-            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
-        }
-        // check deep buffer after flags have been modified above
-        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
-            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-        }
+        audio_attributes_flags_to_audio_output_flags(mAttributes.flags, flags);
     }
 
     // these below should probably come from the audioFlinger too...
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index a406658..0ce8b16 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -62,6 +62,7 @@
     SET_EFFECT_ENABLED,
     IS_STREAM_ACTIVE_REMOTELY,
     IS_OFFLOAD_SUPPORTED,
+    IS_DIRECT_OUTPUT_SUPPORTED,
     LIST_AUDIO_PORTS,
     GET_AUDIO_PORT,
     CREATE_AUDIO_PATCH,
@@ -88,6 +89,7 @@
     REMOVE_SOURCE_DEFAULT_EFFECT,
     SET_ASSISTANT_UID,
     SET_A11Y_SERVICES_UIDS,
+    IS_HAPTIC_PLAYBACK_SUPPORTED,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -329,16 +331,13 @@
         return NO_ERROR;
     }
 
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool *silenced)
+    virtual status_t startInput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(portId);
-        data.writeInt32(*silenced ? 1 : 0);
         remote()->transact(START_INPUT, data, &reply);
         status_t status = static_cast <status_t> (reply.readInt32());
-        *silenced = reply.readInt32() == 1;
         return status;
     }
 
@@ -526,6 +525,16 @@
         return reply.readInt32();
     }
 
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(&config, sizeof(audio_config_base_t));
+        data.write(&attributes, sizeof(audio_attributes_t));
+        status_t status = remote()->transact(IS_DIRECT_OUTPUT_SUPPORTED, data, &reply);
+        return status == NO_ERROR ? static_cast<bool>(reply.readInt32()) : false;
+    }
+
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
                                     unsigned int *num_ports,
@@ -970,6 +979,17 @@
         return static_cast <status_t> (reply.readInt32());
     }
 
+    virtual bool isHapticPlaybackSupported()
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = remote()->transact(IS_HAPTIC_PLAYBACK_SUPPORTED, data, &reply);
+        if (status != NO_ERROR) {
+            return false;
+        }
+        return reply.readBool();
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1219,10 +1239,8 @@
         case START_INPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
-            bool silenced = data.readInt32() == 1;
-            status_t status = startInput(portId, &silenced);
+            status_t status = startInput(portId);
             reply->writeInt32(static_cast <uint32_t>(status));
-            reply->writeInt32(silenced ? 1 : 0);
             return NO_ERROR;
         } break;
 
@@ -1393,6 +1411,18 @@
             return NO_ERROR;
         }
 
+        case IS_DIRECT_OUTPUT_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_config_base_t config = {};
+            audio_attributes_t attributes = {};
+            status_t status = data.read(&config, sizeof(audio_config_base_t));
+            if (status != NO_ERROR) return status;
+            status = data.read(&attributes, sizeof(audio_attributes_t));
+            if (status != NO_ERROR) return status;
+            reply->writeInt32(isDirectOutputSupported(config, attributes));
+            return NO_ERROR;
+        }
+
         case LIST_AUDIO_PORTS: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_port_role_t role = (audio_port_role_t)data.readInt32();
@@ -1777,6 +1807,13 @@
             return NO_ERROR;
         }
 
+        case IS_HAPTIC_PLAYBACK_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            bool isSupported = isHapticPlaybackSupported();
+            reply->writeBool(isSupported);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
index 49432b7..46de6b3 100644
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -123,4 +123,21 @@
     }
 }
 
+// Convert flags sent from Java AudioAttributes.getFlags() method to audio_output_flags_t
+static inline
+void audio_attributes_flags_to_audio_output_flags(const audio_flags_mask_t audioAttributeFlags,
+            audio_output_flags_t &flags) {
+    if ((audioAttributeFlags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+        flags = static_cast<audio_output_flags_t>(flags |
+            AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
+    if ((audioAttributeFlags & AUDIO_FLAG_LOW_LATENCY) != 0) {
+        flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_FAST);
+    }
+    // check deep buffer after flags have been modified above
+    if (flags == AUDIO_OUTPUT_FLAG_NONE && (audioAttributeFlags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
+        flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+    }
+}
+
 #endif //AUDIO_POLICY_HELPER_H_
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 76a79c9..74156ca 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -241,8 +241,7 @@
                                     audio_port_handle_t *selectedDeviceId,
                                     audio_port_handle_t *portId);
 
-    static status_t startInput(audio_port_handle_t portId,
-                               bool *silenced);
+    static status_t startInput(audio_port_handle_t portId);
     static status_t stopInput(audio_port_handle_t portId);
     static void releaseInput(audio_port_handle_t portId);
     static status_t initStreamVolume(audio_stream_type_t stream,
@@ -346,6 +345,8 @@
     static status_t setAssistantUid(uid_t uid);
     static status_t setA11yServicesUids(const std::vector<uid_t>& uids);
 
+    static bool     isHapticPlaybackSupported();
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 8238ea2..7fdf7cc 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -147,6 +147,12 @@
                                      audio_stream_type_t streamType,
                                      uint32_t sampleRate);
 
+    /* Check if direct playback is possible for the given audio configuration and attributes.
+     * Return true if output is possible for the given parameters. Otherwise returns false.
+     */
+    static bool isDirectOutputSupported(const audio_config_base_t& config,
+                                        const audio_attributes_t& attributes);
+
     /* How data is transferred to AudioTrack
      */
     enum transfer_type {
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index a246df6..61f3b27 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -78,8 +78,7 @@
                               audio_input_flags_t flags,
                               audio_port_handle_t *selectedDeviceId,
                               audio_port_handle_t *portId) = 0;
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool *silenced) = 0;
+    virtual status_t startInput(audio_port_handle_t portId) = 0;
     virtual status_t stopInput(audio_port_handle_t portId) = 0;
     virtual void releaseInput(audio_port_handle_t portId) = 0;
     virtual status_t initStreamVolume(audio_stream_type_t stream,
@@ -126,6 +125,10 @@
     // bit rate, duration, video and streaming or offload property is enabled
     virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
 
+    // Check if direct playback is possible for given format, sample rate, channel mask and flags.
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) = 0;
+
     /* List available audio ports and their attributes */
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
@@ -182,6 +185,8 @@
 
     virtual status_t setAssistantUid(uid_t uid) = 0;
     virtual status_t setA11yServicesUids(const std::vector<uid_t>& uids) = 0;
+
+    virtual bool     isHapticPlaybackSupported() = 0;
 };
 
 
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 5c57c43..7a32d3f 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -129,11 +129,14 @@
         "Common/lib",
         "Bundle/lib",
     ],
-
+    shared_libs: [
+        "liblog",
+    ],
     cflags: [
         "-fvisibility=hidden",
         "-DBUILD_FLOAT",
         "-DHIGHER_FS",
+        "-DSUPPORT_MC",
 
         "-Wall",
         "-Werror",
diff --git a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
index 4c2b954..a1fa79a 100644
--- a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
+++ b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
@@ -256,6 +256,9 @@
     LVDBE_Volume_en         VolumeControl;
     LVM_INT16               VolumedB;
     LVM_INT16               HeadroomdB;
+#ifdef SUPPORT_MC
+    LVM_INT16               NrChannels;
+#endif
 
 } LVDBE_Params_t;
 
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c
index fd4016b..0ba2c86 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.c
@@ -118,7 +118,7 @@
      * Calculate the table offsets
      */
     LVM_UINT16 Offset = (LVM_UINT16)((LVM_UINT16)pParams->SampleRate + \
-                                    (LVM_UINT16)(pParams->CentreFrequency * (1+LVDBE_FS_48000)));    
+                                    (LVM_UINT16)(pParams->CentreFrequency * (1+LVDBE_FS_48000)));
 #endif
 
     /*
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c
index 3fff2a2..2946734 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.c
@@ -95,7 +95,7 @@
 #ifdef BUILD_FLOAT
         pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size   = sizeof(LVDBE_Coef_FLOAT_t);
 #else
-        pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size         = sizeof(LVDBE_Coef_t);   
+        pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size         = sizeof(LVDBE_Coef_t);
 #endif
         pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Alignment    = LVDBE_PERSISTENT_COEF_ALIGN;
         pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Type         = LVDBE_PERSISTENT_COEF;
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
index 4e5207f..4225a30 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
@@ -64,7 +64,12 @@
 #define LVDBE_PERSISTENT_COEF_ALIGN      4       /* 32-bit alignment for coef */
 #define LVDBE_SCRATCH_ALIGN              4       /* 32-bit alignment for long data */
 
+#ifdef SUPPORT_MC
+/* Number of buffers required for inplace processing */
+#define LVDBE_SCRATCHBUFFERS_INPLACE     (LVM_MAX_CHANNELS * 3)
+#else
 #define LVDBE_SCRATCHBUFFERS_INPLACE     6       /* Number of buffers required for inplace processing */
+#endif
 
 #define LVDBE_MIXER_TC                   5       /* Mixer time  */
 #define LVDBE_BYPASS_MIXER_TC            100     /* Bypass mixer time */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c
index 10ea700..c4d3403 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.c
@@ -21,11 +21,13 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#include <string.h> // memset
 #include "LVDBE.h"
 #include "LVDBE_Private.h"
 #include "VectorArithmetic.h"
 #include "AGC.h"
 #include "LVDBE_Coeffs.h"               /* Filter coefficients */
+#include <log/log.h>
 
 /********************************************************************************************/
 /*                                                                                          */
@@ -187,42 +189,49 @@
 LVDBE_ReturnStatus_en LVDBE_Process(LVDBE_Handle_t hInstance,
     const LVM_FLOAT *pInData,
     LVM_FLOAT *pOutData,
-    LVM_UINT16 NumSamples)
+    const LVM_UINT16 NrFrames) // updated to use samples = frames * channels.
 {
-
   LVDBE_Instance_t *pInstance =(LVDBE_Instance_t *)hInstance;
-  LVM_FLOAT *pScratch_in = (LVM_FLOAT *)pInstance->MemoryTable.Region
-  [LVDBE_MEMREGION_SCRATCH].pBaseAddress;
-  LVM_FLOAT *pScratch = pScratch_in + 2 * NumSamples;
-  LVM_FLOAT *pMono;
-  LVM_INT32 ii = 0;
 
-  /* Scratch for Volume Control starts at offset of 4*NumSamples float values from pScratch */
-  LVM_FLOAT           *pScratchVol = (LVM_FLOAT *)(&pScratch_in[4 * NumSamples]);
-//  LVM_INT16 *pScratchVol_int = (LVM_INT16 *)(pScratchVol);
+  /*Extract number of Channels info*/
+#ifdef SUPPORT_MC
+  // Mono passed in as stereo
+  const LVM_INT32 NrChannels = pInstance->Params.NrChannels == 1
+      ? 2 : pInstance->Params.NrChannels;
+#else
+  const LVM_INT32 NrChannels = 2; // FCC_2
+#endif
+  const LVM_INT32 NrSamples = NrChannels * NrFrames;
 
-  /* Scratch for Mono path starts at offset of 6*NumSamples 32-bit values from pScratch */
-  pMono = &pScratch_in[4 * NumSamples];
+  /* Space to store DBE path computation */
+  LVM_FLOAT * const pScratch =
+          (LVM_FLOAT *)pInstance->MemoryTable.Region[LVDBE_MEMREGION_SCRATCH].pBaseAddress;
 
   /*
-   * Check the number of samples is not too large
+   * Scratch for Mono path starts at offset of
+   * NrSamples float values from pScratch.
    */
-  if (NumSamples > pInstance->Capabilities.MaxBlockSize)
+  LVM_FLOAT * const pMono = pScratch + NrSamples;
+
+  /*
+   * TRICKY: pMono is used and discarded by the DBE path.
+   *         so it is available for use for the pScratchVol
+   *         path which is computed afterwards.
+   *
+   * Space to store Volume Control path computation.
+   * This is identical to pMono (see TRICKY comment).
+   */
+  LVM_FLOAT * const pScratchVol = pMono;
+
+  /*
+   * Check the number of frames is not too large
+   */
+  if (NrFrames > pInstance->Capabilities.MaxBlockSize)
   {
-    return(LVDBE_TOOMANYSAMPLES);
+    return LVDBE_TOOMANYSAMPLES;
   }
 
   /*
-   * Convert 16-bit samples to Float
-   */
-  Copy_Float(pInData, /* Source 16-bit data    */
-      pScratch_in, /* Dest. 32-bit data     */
-      (LVM_INT16)(2 * NumSamples)); /* Left and right        */
-
-  for (ii = 0; ii < 2 * NumSamples; ii++) {
-    pScratch[ii] = pScratch_in[ii];
-  }
-  /*
    * Check if the algorithm is enabled
    */
   /* DBE path is processed when DBE is ON or during On/Off transitions */
@@ -230,50 +239,81 @@
       (LVC_Mixer_GetCurrent(&pInstance->pData->BypassMixer.MixerStream[0])
           !=LVC_Mixer_GetTarget(&pInstance->pData->BypassMixer.MixerStream[0])))
   {
+    // make copy of input data
+    Copy_Float(pInData,
+        pScratch,
+        (LVM_INT16)NrSamples);
 
     /*
      * Apply the high pass filter if selected
      */
     if (pInstance->Params.HPFSelect == LVDBE_HPF_ON)
     {
+#ifdef SUPPORT_MC
+      BQ_MC_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance, /* Filter instance      */
+          pScratch, /* Source               */
+          pScratch, /* Destination          */
+          (LVM_INT16)NrFrames,
+          (LVM_INT16)NrChannels);
+#else
       BQ_2I_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance,/* Filter instance      */
-          (LVM_FLOAT *)pScratch, /* Source               */
-          (LVM_FLOAT *)pScratch, /* Destination          */
-          (LVM_INT16)NumSamples); /* Number of samples    */
+          pScratch, /* Source               */
+          pScratch, /* Destination          */
+          (LVM_INT16)NrFrames);
+#endif
     }
 
     /*
      * Create the mono stream
      */
-    From2iToMono_Float((LVM_FLOAT *)pScratch, /* Stereo source         */
+#ifdef SUPPORT_MC
+    FromMcToMono_Float(pScratch, /* Source */
+        pMono, /* Mono destination */
+        (LVM_INT16)NrFrames,  /* Number of frames */
+        (LVM_INT16)NrChannels);
+#else
+    From2iToMono_Float(pScratch, /* Stereo source         */
         pMono, /* Mono destination      */
-        (LVM_INT16)NumSamples); /* Number of samples     */
+        (LVM_INT16)NrFrames);
+#endif
 
     /*
      * Apply the band pass filter
      */
     BP_1I_D32F32C30_TRC_WRA_02(&pInstance->pCoef->BPFInstance, /* Filter instance       */
-        (LVM_FLOAT *)pMono, /* Source                */
-        (LVM_FLOAT *)pMono, /* Destination           */
-        (LVM_INT16)NumSamples); /* Number of samples     */
+        pMono, /* Source                */
+        pMono, /* Destination           */
+        (LVM_INT16)NrFrames);
 
     /*
      * Apply the AGC and mix
      */
+#ifdef SUPPORT_MC
+    AGC_MIX_VOL_Mc1Mon_D32_WRA(&pInstance->pData->AGCInstance, /* Instance pointer      */
+        pScratch, /* Source         */
+        pMono, /* Mono band pass source */
+        pScratch, /* Destination    */
+        NrFrames, /* Number of frames     */
+        NrChannels); /* Number of channels     */
+#else
     AGC_MIX_VOL_2St1Mon_D32_WRA(&pInstance->pData->AGCInstance, /* Instance pointer      */
         pScratch, /* Stereo source         */
         pMono, /* Mono band pass source */
         pScratch, /* Stereo destination    */
-        NumSamples); /* Number of samples     */
+        NrFrames);
+#endif
 
-    for (ii = 0; ii < 2 * NumSamples; ii++) {
+    for (LVM_INT32 ii = 0; ii < NrSamples; ++ii) {
       //TODO: replace with existing clamping function
-      if(pScratch[ii] < -1.0) {
+      if (pScratch[ii] < -1.0) {
         pScratch[ii] = -1.0;
-      } else if(pScratch[ii] > 1.0) {
+      } else if (pScratch[ii] > 1.0) {
         pScratch[ii] = 1.0;
       }
     }
+  } else {
+    // clear DBE processed path
+    memset(pScratch, 0, sizeof(*pScratch) * NrSamples);
   }
 
   /* Bypass Volume path is processed when DBE is OFF or during On/Off transitions */
@@ -286,21 +326,40 @@
      * The algorithm is disabled but volume management is required to compensate for
      * headroom and volume (if enabled)
      */
-    LVC_MixSoft_1St_D16C31_SAT(&pInstance->pData->BypassVolume,
-        pScratch_in,
+#ifdef SUPPORT_MC
+    LVC_MixSoft_Mc_D16C31_SAT(&pInstance->pData->BypassVolume,
+        pInData,
         pScratchVol,
-        (LVM_INT16)(2 * NumSamples)); /* Left and right */
+        (LVM_INT16)NrFrames,
+        (LVM_INT16)NrChannels);
+#else
+    LVC_MixSoft_1St_D16C31_SAT(&pInstance->pData->BypassVolume,
+        pInData,
+        pScratchVol,
+        (LVM_INT16)NrSamples); /* Left and right, really # samples */
+#endif
+  } else {
+    // clear bypass volume path
+    memset(pScratchVol, 0, sizeof(*pScratchVol) * NrSamples);
   }
 
   /*
    * Mix DBE processed path and bypass volume path
    */
+#ifdef SUPPORT_MC
+  LVC_MixSoft_2Mc_D16C31_SAT(&pInstance->pData->BypassMixer,
+      pScratch,
+      pScratchVol,
+      pOutData,
+      (LVM_INT16)NrFrames,
+      (LVM_INT16)NrChannels);
+#else
   LVC_MixSoft_2St_D16C31_SAT(&pInstance->pData->BypassMixer,
       pScratch,
       pScratchVol,
       pOutData,
-      (LVM_INT16)(2 * NumSamples));
-
-  return(LVDBE_SUCCESS);
+      (LVM_INT16)NrSamples);
+#endif
+  return LVDBE_SUCCESS;
 }
 #endif
diff --git a/media/libeffects/lvm/lib/Bundle/lib/LVM.h b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
index 9b6da31..83ecae1 100644
--- a/media/libeffects/lvm/lib/Bundle/lib/LVM.h
+++ b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
@@ -296,6 +296,9 @@
     /* Spectrum Analyzer parameters Control */
     LVM_PSA_Mode_en             PSA_Enable;
     LVM_PSA_DecaySpeed_en       PSA_PeakDecayRate;      /* Peak value decay rate*/
+#ifdef SUPPORT_MC
+    LVM_INT32                   NrChannels;
+#endif
 
 } LVM_ControlParams_t;
 
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c
index 0a3c30e..37272e3 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.c
@@ -25,6 +25,8 @@
 #include "LVM_Private.h"
 #include "VectorArithmetic.h"
 
+#include <log/log.h>
+
 /****************************************************************************************/
 /*                                                                                      */
 /* FUNCTION:                 LVM_BufferManagedIn                                        */
@@ -62,8 +64,11 @@
     LVM_Instance_t   *pInstance = (LVM_Instance_t  *)hInstance;
     LVM_Buffer_t     *pBuffer;
     LVM_FLOAT        *pDest;
+#ifdef SUPPORT_MC
+    LVM_INT16        NumChannels = pInstance->NrChannels;
+#else
     LVM_INT16        NumChannels = 2;
-
+#endif
 
     /*
      * Set the processing address pointers
@@ -207,8 +212,7 @@
     LVM_Instance_t   *pInstance = (LVM_Instance_t  *)hInstance;
     LVM_Buffer_t     *pBuffer;
     LVM_INT16        *pDest;
-    LVM_INT16        NumChannels =2;
-
+    LVM_INT16        NumChannels = 2;
 
     /*
      * Set the processing address pointers
@@ -499,7 +503,7 @@
         if (pBuffer->OutDelaySamples != 0)
         {
             Copy_16(&pBuffer->OutDelayBuffer[0],                    /* Source */
-                    pDest,                                          /* Detsination */
+                    pDest,                                          /* Destination */
                     (LVM_INT16)(2*pBuffer->OutDelaySamples));       /* Number of delay samples */
             pDest += 2 * pBuffer->OutDelaySamples;                  /* Update the output pointer */
             pBuffer->SamplesToOutput = (LVM_INT16)(pBuffer->SamplesToOutput - pBuffer->OutDelaySamples); /* Update the numbr of samples to output */
@@ -750,7 +754,11 @@
     LVM_INT16       NumSamples;
     LVM_FLOAT       *pStart;
     LVM_FLOAT       *pDest;
-
+#ifdef SUPPORT_MC
+    LVM_INT32       NrChannels = pInstance->NrChannels;
+#define NrFrames NumSamples  // alias for clarity
+#define FrameCount SampleCount
+#endif
 
     /*
      * Set the pointers
@@ -758,7 +766,6 @@
     NumSamples = pBuffer->SamplesToOutput;
     pStart     = pBuffer->pScratch;
 
-
     /*
      * check if it is the first call of a block
       */
@@ -786,14 +793,25 @@
             /*
              * Copy all output delay samples to the output
              */
+#ifdef SUPPORT_MC
             Copy_Float(&pBuffer->OutDelayBuffer[0],                /* Source */
-                       pDest,                                      /* Detsination */
+                       pDest,                                      /* Destination */
+                       /* Number of delay samples */
+                       (LVM_INT16)(NrChannels * pBuffer->OutDelaySamples));
+#else
+            Copy_Float(&pBuffer->OutDelayBuffer[0],                /* Source */
+                       pDest,                                      /* Destination */
                        (LVM_INT16)(2 * pBuffer->OutDelaySamples)); /* Number of delay samples */
+#endif
 
             /*
              * Update the pointer and sample counts
              */
+#ifdef SUPPORT_MC
+            pDest += NrChannels * pBuffer->OutDelaySamples; /* Output sample pointer */
+#else
             pDest += 2 * pBuffer->OutDelaySamples; /* Output sample pointer */
+#endif
             NumSamples = (LVM_INT16)(NumSamples - pBuffer->OutDelaySamples); /* Samples left \
                                                                                 to send */
             pBuffer->OutDelaySamples = 0; /* No samples left in the buffer */
@@ -803,23 +821,40 @@
             /*
              * Copy only some of the ouput delay samples to the output
              */
+#ifdef SUPPORT_MC
             Copy_Float(&pBuffer->OutDelayBuffer[0],                    /* Source */
-                       pDest,                                          /* Detsination */
+                       pDest,                                          /* Destination */
+                       (LVM_INT16)(NrChannels * NrFrames));       /* Number of delay samples */
+#else
+            Copy_Float(&pBuffer->OutDelayBuffer[0],                    /* Source */
+                       pDest,                                          /* Destination */
                        (LVM_INT16)(2 * NumSamples));       /* Number of delay samples */
+#endif
 
             /*
              * Update the pointer and sample counts
              */
+#ifdef SUPPORT_MC
+            pDest += NrChannels * NrFrames; /* Output sample pointer */
+#else
             pDest += 2 * NumSamples; /* Output sample pointer */
+#endif
             /* No samples left in the buffer */
             pBuffer->OutDelaySamples = (LVM_INT16)(pBuffer->OutDelaySamples - NumSamples);
 
             /*
              * Realign the delay buffer data to avoid using circular buffer management
              */
+#ifdef SUPPORT_MC
+            Copy_Float(&pBuffer->OutDelayBuffer[NrChannels * NrFrames],         /* Source */
+                       &pBuffer->OutDelayBuffer[0],                    /* Destination */
+                       /* Number of samples to move */
+                       (LVM_INT16)(NrChannels * pBuffer->OutDelaySamples));
+#else
             Copy_Float(&pBuffer->OutDelayBuffer[2 * NumSamples],         /* Source */
                        &pBuffer->OutDelayBuffer[0],                    /* Destination */
                        (LVM_INT16)(2 * pBuffer->OutDelaySamples)); /* Number of samples to move */
+#endif
             NumSamples = 0;                                /* Samples left to send */
         }
     }
@@ -836,13 +871,23 @@
             /*
              * Copy all processed samples to the output
              */
+#ifdef SUPPORT_MC
             Copy_Float(pStart,                                      /* Source */
-                       pDest,                                       /* Detsination */
+                       pDest,                                       /* Destination */
+                       (LVM_INT16)(NrChannels * FrameCount)); /* Number of processed samples */
+#else
+            Copy_Float(pStart,                                      /* Source */
+                       pDest,                                       /* Destination */
                        (LVM_INT16)(2 * SampleCount)); /* Number of processed samples */
+#endif
             /*
              * Update the pointer and sample counts
              */
+#ifdef SUPPORT_MC
+            pDest      += NrChannels * FrameCount;                 /* Output sample pointer */
+#else
             pDest      += 2 * SampleCount;                          /* Output sample pointer */
+#endif
             NumSamples  = (LVM_INT16)(NumSamples - SampleCount);    /* Samples left to send */
             SampleCount = 0; /* No samples left in the buffer */
         }
@@ -851,14 +896,25 @@
             /*
              * Copy only some processed samples to the output
              */
+#ifdef SUPPORT_MC
+            Copy_Float(pStart,                                         /* Source */
+                       pDest,                                          /* Destination */
+                       (LVM_INT16)(NrChannels * NrFrames));  /* Number of processed samples */
+#else
             Copy_Float(pStart,                                         /* Source */
                        pDest,                                          /* Destination */
                        (LVM_INT16)(2 * NumSamples));     /* Number of processed samples */
+#endif
             /*
              * Update the pointers and sample counts
                */
+#ifdef SUPPORT_MC
+            pStart      += NrChannels * NrFrames;               /* Processed sample pointer */
+            pDest       += NrChannels * NrFrames;               /* Output sample pointer */
+#else
             pStart      += 2 * NumSamples;                        /* Processed sample pointer */
             pDest       += 2 * NumSamples;                        /* Output sample pointer */
+#endif
             SampleCount  = (LVM_INT16)(SampleCount - NumSamples); /* Processed samples left */
             NumSamples   = 0;                                     /* Clear the sample count */
         }
@@ -870,9 +926,16 @@
      */
     if (SampleCount != 0)
     {
+#ifdef SUPPORT_MC
+        Copy_Float(pStart,                                                 /* Source */
+                   /* Destination */
+                   &pBuffer->OutDelayBuffer[NrChannels * pBuffer->OutDelaySamples],
+                   (LVM_INT16)(NrChannels * FrameCount));      /* Number of processed samples */
+#else
         Copy_Float(pStart,                                                 /* Source */
                    &pBuffer->OutDelayBuffer[2 * pBuffer->OutDelaySamples], /* Destination */
                    (LVM_INT16)(2 * SampleCount));               /* Number of processed samples */
+#endif
         /* Update the buffer count */
         pBuffer->OutDelaySamples = (LVM_INT16)(pBuffer->OutDelaySamples + SampleCount);
     }
@@ -1063,14 +1126,24 @@
 {
 
     LVM_Instance_t      *pInstance  = (LVM_Instance_t  *)hInstance;
-    LVM_INT16           NumChannels =2;
+#ifdef SUPPORT_MC
+    LVM_INT16           NumChannels = pInstance->NrChannels;
+#undef NrFrames
+#define NrFrames (*pNumSamples) // alias for clarity
+#else
+    LVM_INT16           NumChannels = 2;
+#endif
 
 
     /*
      * Update sample counts
      */
     pInstance->pInputSamples    += (LVM_INT16)(*pNumSamples * NumChannels); /* Update the I/O pointers */
+#ifdef SUPPORT_MC
+    pInstance->pOutputSamples   += (LVM_INT16)(NrFrames * NumChannels);
+#else
     pInstance->pOutputSamples   += (LVM_INT16)(*pNumSamples * 2);
+#endif
     pInstance->SamplesToProcess  = (LVM_INT16)(pInstance->SamplesToProcess - *pNumSamples); /* Update the sample count */
 
     /*
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
index cfe53b8..7b85f23 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
@@ -28,6 +28,8 @@
 #include "LVM_Tables.h"
 #include "LVM_Private.h"
 
+#include <log/log.h>
+
 /****************************************************************************************/
 /*                                                                                      */
 /* FUNCTION:           LVM_SetControlParameters                                         */
@@ -75,12 +77,22 @@
         (pParams->SampleRate != LVM_FS_16000) && (pParams->SampleRate != LVM_FS_22050) && (pParams->SampleRate != LVM_FS_24000)       &&
         (pParams->SampleRate != LVM_FS_32000) && (pParams->SampleRate != LVM_FS_44100) && (pParams->SampleRate != LVM_FS_48000))      ||
 #endif
+#ifdef SUPPORT_MC
+        ((pParams->SourceFormat != LVM_STEREO) &&
+         (pParams->SourceFormat != LVM_MONOINSTEREO) &&
+         (pParams->SourceFormat != LVM_MONO) &&
+         (pParams->SourceFormat != LVM_MULTICHANNEL)) ||
+#else
         ((pParams->SourceFormat != LVM_STEREO) && (pParams->SourceFormat != LVM_MONOINSTEREO) && (pParams->SourceFormat != LVM_MONO)) ||
+#endif
         (pParams->SpeakerType > LVM_EX_HEADPHONES))
     {
         return (LVM_OUTOFRANGE);
     }
 
+#ifdef SUPPORT_MC
+    pInstance->Params.NrChannels = pParams->NrChannels;
+#endif
     /*
      * Cinema Sound parameters
      */
@@ -569,6 +581,10 @@
     } while ((pInstance->ControlPending != LVM_FALSE) &&
              (Count > 0));
 
+#ifdef SUPPORT_MC
+    pInstance->NrChannels = LocalParams.NrChannels;
+#endif
+
     /* Clear all internal data if format change*/
     if(LocalParams.SourceFormat != pInstance->Params.SourceFormat)
     {
@@ -719,6 +735,9 @@
         DBE_Params.HeadroomdB       = 0;
         DBE_Params.VolumeControl    = LVDBE_VOLUME_OFF;
         DBE_Params.VolumedB         = 0;
+#ifdef SUPPORT_MC
+        DBE_Params.NrChannels         = LocalParams.NrChannels;
+#endif
 
         /*
          * Make the changes
@@ -775,7 +794,9 @@
         {
             EQNB_Params.SourceFormat = LVEQNB_MONOINSTEREO;     /* Force to Mono-in-Stereo mode */
         }
-
+#ifdef SUPPORT_MC
+        EQNB_Params.NrChannels         = LocalParams.NrChannels;
+#endif
 
         /*
          * Set the control flag
@@ -849,7 +870,9 @@
         CS_Params.SampleRate  = LocalParams.SampleRate;
         CS_Params.ReverbLevel = LocalParams.VirtualizerReverbLevel;
         CS_Params.EffectLevel = LocalParams.CS_EffectLevel;
-
+#ifdef SUPPORT_MC
+        CS_Params.NrChannels  = LocalParams.NrChannels;
+#endif
 
         /*
          * Set the control flag
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
index 26c1c4f..ade329b 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
@@ -584,8 +584,11 @@
     /*
      * DC removal filter
      */
+#ifdef SUPPORT_MC
+    DC_Mc_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
+#else
     DC_2I_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
-
+#endif
 
     /*
      * Treble Enhancement
@@ -1039,7 +1042,11 @@
     LVM_SetHeadroomParams(hInstance, &HeadroomParams);
 
     /* DC removal filter */
+#ifdef SUPPORT_MC
+    DC_Mc_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
+#else
     DC_2I_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
+#endif
 
     return LVM_SUCCESS;
 }
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
index b453222..19d1532 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
@@ -144,12 +144,19 @@
     LVM_FLOAT               *pScratch;          /* Bundle scratch buffer */
 
     LVM_INT16               BufferState;        /* Buffer status */
+#ifdef SUPPORT_MC
+    LVM_FLOAT               InDelayBuffer[3 * LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE];
+#else
     LVM_FLOAT               InDelayBuffer[6 * MIN_INTERNAL_BLOCKSIZE]; /* Input buffer delay line, \
                                                                            left and right */
+#endif
     LVM_INT16               InDelaySamples;     /* Number of samples in the input delay buffer */
-
+#ifdef SUPPORT_MC
+    LVM_FLOAT               OutDelayBuffer[LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE];
+#else
     LVM_FLOAT               OutDelayBuffer[2 * MIN_INTERNAL_BLOCKSIZE]; /* Output buffer delay \
                                                                                       line */
+#endif
     LVM_INT16               OutDelaySamples;    /* Number of samples in the output delay buffer, \
                                                                              left and right */
     LVM_INT16               SamplesToOutput;    /* Samples to write to the output */
@@ -282,6 +289,10 @@
 
     LVM_INT16              NoSmoothVolume;      /* Enable or disable smooth volume changes*/
 
+#ifdef SUPPORT_MC
+    LVM_INT16              NrChannels;
+#endif
+
 } LVM_Instance_t;
 
 
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
index 4a19a13..94ba278 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
@@ -65,6 +65,10 @@
     LVM_FLOAT           *pToProcess = (LVM_FLOAT *)pInData;
     LVM_FLOAT           *pProcessed = pOutData;
     LVM_ReturnStatus_en  Status;
+#ifdef SUPPORT_MC
+    LVM_INT32           NrChannels  = pInstance->NrChannels;
+#define NrFrames SampleCount  // alias for clarity
+#endif
 
     /*
      * Check if the number of samples is zero
@@ -112,6 +116,10 @@
     if (pInstance->ControlPending == LVM_TRUE)
     {
         Status = LVM_ApplyNewSettings(hInstance);
+#ifdef SUPPORT_MC
+        /* Update the local variable NrChannels from pInstance->NrChannels value */
+        NrChannels = pInstance->NrChannels;
+#endif
 
         if(Status != LVM_SUCCESS)
         {
@@ -130,6 +138,9 @@
                        (LVM_INT16)NumSamples);                 /* Number of input samples */
         pInput     = pOutData;
         pToProcess = pOutData;
+#ifdef SUPPORT_MC
+        NrChannels = 2;
+#endif
     }
 
 
@@ -153,7 +164,6 @@
          */
         if (SampleCount != 0)
         {
-
             /*
              * Apply ConcertSound if required
              */
@@ -171,10 +181,18 @@
              */
             if (pInstance->VC_Active!=0)
             {
+#ifdef SUPPORT_MC
+                LVC_MixSoft_Mc_D16C31_SAT(&pInstance->VC_Volume,
+                                       pToProcess,
+                                       pProcessed,
+                                       (LVM_INT16)(NrFrames),
+                                       NrChannels);
+#else
                 LVC_MixSoft_1St_D16C31_SAT(&pInstance->VC_Volume,
                                        pToProcess,
                                        pProcessed,
                                        (LVM_INT16)(2 * SampleCount));     /* Left and right*/
+#endif
                 pToProcess = pProcessed;
             }
 
@@ -221,20 +239,33 @@
                 /*
                  * Apply the filter
                  */
+#ifdef SUPPORT_MC
+                FO_Mc_D16F32C15_LShx_TRC_WRA_01(&pInstance->pTE_State->TrebleBoost_State,
+                                           pProcessed,
+                                           pProcessed,
+                                           (LVM_INT16)NrFrames,
+                                           (LVM_INT16)NrChannels);
+#else
                 FO_2I_D16F32C15_LShx_TRC_WRA_01(&pInstance->pTE_State->TrebleBoost_State,
                                            pProcessed,
                                            pProcessed,
                                            (LVM_INT16)SampleCount);
+#endif
 
             }
-
-            /*
-             * Volume balance
-             */
-            LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
-                                            pProcessed,
-                                            pProcessed,
-                                            SampleCount);
+#ifdef SUPPORT_MC
+            /* TODO - Multichannel support to be added */
+            if (NrChannels == 2)
+#endif
+            {
+                /*
+                 * Volume balance
+                 */
+                LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
+                                              pProcessed,
+                                              pProcessed,
+                                              SampleCount);
+            }
 
             /*
              * Perform Parametric Spectum Analysis
@@ -242,28 +273,39 @@
             if ((pInstance->Params.PSA_Enable == LVM_PSA_ON) &&
                                             (pInstance->InstParams.PSA_Included == LVM_PSA_ON))
             {
-                    From2iToMono_Float(pProcessed,
-                                       pInstance->pPSAInput,
-                                       (LVM_INT16)(SampleCount));
+#ifdef SUPPORT_MC
+                FromMcToMono_Float(pProcessed,
+                                   pInstance->pPSAInput,
+                                   (LVM_INT16)(NrFrames),
+                                   NrChannels);
+#else
+                From2iToMono_Float(pProcessed,
+                                   pInstance->pPSAInput,
+                                   (LVM_INT16)(SampleCount));
+#endif
 
-                    LVPSA_Process(pInstance->hPSAInstance,
-                            pInstance->pPSAInput,
-                            (LVM_UINT16)(SampleCount),
-                            AudioTime);
+                LVPSA_Process(pInstance->hPSAInstance,
+                        pInstance->pPSAInput,
+                        (LVM_UINT16)(SampleCount),
+                        AudioTime);
             }
 
-
             /*
              * DC removal
              */
+#ifdef SUPPORT_MC
+            DC_Mc_D16_TRC_WRA_01(&pInstance->DC_RemovalInstance,
+                                 pProcessed,
+                                 pProcessed,
+                                 (LVM_INT16)NrFrames,
+                                 NrChannels);
+#else
             DC_2I_D16_TRC_WRA_01(&pInstance->DC_RemovalInstance,
                                  pProcessed,
                                  pProcessed,
                                  (LVM_INT16)SampleCount);
-
-
+#endif
         }
-
         /*
          * Manage the output buffer
          */
@@ -497,4 +539,4 @@
 
     return(LVM_SUCCESS);
 }
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Common/lib/AGC.h b/media/libeffects/lvm/lib/Common/lib/AGC.h
index 9a3d35d..06e742e 100644
--- a/media/libeffects/lvm/lib/Common/lib/AGC.h
+++ b/media/libeffects/lvm/lib/Common/lib/AGC.h
@@ -78,6 +78,15 @@
                                  const LVM_FLOAT            *pMonoSrc,      /* Mono source */
                                  LVM_FLOAT                  *pDst,          /* Stereo destination */
                                  LVM_UINT16                 n);             /* Number of samples */
+#ifdef SUPPORT_MC
+void AGC_MIX_VOL_Mc1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_FLOAT_t  *pInstance,  /* Instance pointer */
+                                 const LVM_FLOAT            *pStSrc,      /* Source */
+                                 const LVM_FLOAT            *pMonoSrc,    /* Mono source */
+                                 LVM_FLOAT                  *pDst,        /* Destination */
+                                 LVM_UINT16                 NrFrames,     /* Number of frames */
+                                 LVM_UINT16                 NrChannels);  /* Number of channels */
+#endif
+
 #else
 void AGC_MIX_VOL_2St1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_D32_t  *pInstance,     /* Instance pointer */
                                  const LVM_INT32            *pStSrc,        /* Stereo source */
diff --git a/media/libeffects/lvm/lib/Common/lib/BIQUAD.h b/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
index 3ee7f63..01539b2 100644
--- a/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
+++ b/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
@@ -30,8 +30,17 @@
 #ifdef BUILD_FLOAT
 typedef struct
 {
+#ifdef SUPPORT_MC
+    /* The memory region created by this structure instance is typecast
+     * into another structure containing a pointer and an array of filter
+     * coefficients. In one case this memory region is used for storing
+     * DC component of channels
+     */
+    LVM_FLOAT *pStorage;
+    LVM_FLOAT Storage[LVM_MAX_CHANNELS];
+#else
     LVM_FLOAT Storage[6];
-
+#endif
 } Biquad_FLOAT_Instance_t;
 #else
 typedef struct
@@ -179,7 +188,12 @@
 
 typedef struct
 {
-    LVM_FLOAT Storage[ (2 * 2) ];  /* Two channels, two taps of size LVM_INT32 */
+#ifdef SUPPORT_MC
+    /* LVM_MAX_CHANNELS channels, two taps of size LVM_FLOAT */
+    LVM_FLOAT Storage[ (LVM_MAX_CHANNELS * 2) ];
+#else
+    LVM_FLOAT Storage[ (2 * 2) ];  /* Two channels, two taps of size LVM_FLOAT */
+#endif
 } Biquad_2I_Order1_FLOAT_Taps_t;
 #else
 typedef struct
@@ -197,12 +211,17 @@
 #ifdef BUILD_FLOAT
 typedef struct
 {
-    LVM_FLOAT Storage[ (1 * 4) ];  /* One channel, four taps of size LVM_INT32 */
+    LVM_FLOAT Storage[ (1 * 4) ];  /* One channel, four taps of size LVM_FLOAT */
 } Biquad_1I_Order2_FLOAT_Taps_t;
 
 typedef struct
 {
-    LVM_FLOAT Storage[ (2 * 4) ];  /* Two channels, four taps of size LVM_INT32 */
+#ifdef SUPPORT_MC
+    /* LVM_MAX_CHANNELS, four taps of size LVM_FLOAT */
+    LVM_FLOAT Storage[ (LVM_MAX_CHANNELS * 4) ];
+#else
+    LVM_FLOAT Storage[ (2 * 4) ];  /* Two channels, four taps of size LVM_FLOAT */
+#endif
 } Biquad_2I_Order2_FLOAT_Taps_t;
 #else
 typedef struct
@@ -366,6 +385,13 @@
                                             LVM_FLOAT                    *pDataIn,
                                             LVM_FLOAT                    *pDataOut,
                                             LVM_INT16                 NrSamples);
+#ifdef SUPPORT_MC
+void BQ_MC_D32F32C30_TRC_WRA_01 (           Biquad_FLOAT_Instance_t      *pInstance,
+                                            LVM_FLOAT                    *pDataIn,
+                                            LVM_FLOAT                    *pDataOut,
+                                            LVM_INT16                    NrFrames,
+                                            LVM_INT16                    NrChannels);
+#endif
 #else
 void BQ_2I_D32F32Cll_TRC_WRA_01_Init (      Biquad_Instance_t       *pInstance,
                                             Biquad_2I_Order2_Taps_t *pTaps,
@@ -434,6 +460,13 @@
                                  LVM_FLOAT                     *pDataIn,
                                  LVM_FLOAT                     *pDataOut,
                                  LVM_INT16                     NrSamples);
+#ifdef SUPPORT_MC
+void FO_Mc_D16F32C15_LShx_TRC_WRA_01(Biquad_FLOAT_Instance_t  *pInstance,
+                                     LVM_FLOAT                *pDataIn,
+                                     LVM_FLOAT                *pDataOut,
+                                     LVM_INT16                NrFrames,
+                                     LVM_INT16                NrChannels);
+#endif
 #else
 void FO_1I_D32F32Cll_TRC_WRA_01_Init(       Biquad_Instance_t       *pInstance,
                                             Biquad_1I_Order1_Taps_t *pTaps,
@@ -527,6 +560,13 @@
                                     LVM_FLOAT               *pDataIn,
                                     LVM_FLOAT               *pDataOut,
                                     LVM_INT16               NrSamples);
+#ifdef SUPPORT_MC
+void PK_Mc_D32F32C14G11_TRC_WRA_01(Biquad_FLOAT_Instance_t       *pInstance,
+                                   LVM_FLOAT               *pDataIn,
+                                   LVM_FLOAT               *pDataOut,
+                                   LVM_INT16               NrFrames,
+                                   LVM_INT16               NrChannels);
+#endif
 #else
 void PK_2I_D32F32C14G11_TRC_WRA_01 (        Biquad_Instance_t       *pInstance,
                                             LVM_INT32                    *pDataIn,
@@ -540,12 +580,22 @@
 
 /*** 16 bit data path STEREO ******************************************************/
 #ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void DC_Mc_D16_TRC_WRA_01_Init     (        Biquad_FLOAT_Instance_t       *pInstance);
+
+void DC_Mc_D16_TRC_WRA_01          (        Biquad_FLOAT_Instance_t       *pInstance,
+                                            LVM_FLOAT               *pDataIn,
+                                            LVM_FLOAT               *pDataOut,
+                                            LVM_INT16               NrFrames,
+                                            LVM_INT16               NrChannels);
+#else
 void DC_2I_D16_TRC_WRA_01_Init     (        Biquad_FLOAT_Instance_t       *pInstance);
 
 void DC_2I_D16_TRC_WRA_01          (        Biquad_FLOAT_Instance_t       *pInstance,
                                             LVM_FLOAT               *pDataIn,
                                             LVM_FLOAT               *pDataOut,
                                             LVM_INT16               NrSamples);
+#endif
 #else
 void DC_2I_D16_TRC_WRA_01_Init     (        Biquad_Instance_t       *pInstance);
 
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h b/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h
index 81e288c..a76354d 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Timer.h
@@ -44,7 +44,15 @@
 
 typedef struct
 {
-    LVM_INT32 Storage[6];
+    /*
+     * The memory area created using this structure is internally
+     * typecast to LVM_Timer_Instance_Private_t and used.
+     * The LVM_Timer_Instance_Private_t structure has 3 pointer type elements
+     * 2 elements of type LVM_INT32 and one element of type LVM_INT16.
+     * Inorder to cater both 32 and 64 bit builds, Storage array should
+     * have a minimum of 9 elements of type LVM_INT32.
+     */
+    LVM_INT32 Storage[9];
 
 } LVM_Timer_Instance_t;
 
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index ea16072..303b62d 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -122,6 +122,12 @@
 
 #endif // NATIVE_FLOAT_BUFFER
 
+#ifdef SUPPORT_MC
+#define LVM_MAX_CHANNELS 8 // FCC_8
+#else
+#define LVM_MAX_CHANNELS 2 // FCC_2
+#endif
+
 /****************************************************************************************/
 /*                                                                                      */
 /*  Standard Enumerated types                                                           */
@@ -143,6 +149,9 @@
     LVM_STEREO          = 0,
     LVM_MONOINSTEREO    = 1,
     LVM_MONO            = 2,
+#ifdef SUPPORT_MC
+    LVM_MULTICHANNEL    = 3,
+#endif
     LVM_SOURCE_DUMMY    = LVM_MAXENUM
 } LVM_Format_en;
 
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 0ba20a3..7468a90 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -47,6 +47,16 @@
 void Copy_Float(                 const LVM_FLOAT *src,
                                  LVM_FLOAT *dst,
                                  LVM_INT16 n );
+#ifdef SUPPORT_MC
+void Copy_Float_Mc_Stereo(       const LVM_FLOAT *src,
+                                 LVM_FLOAT *dst,
+                                 LVM_INT16 NrFrames,
+                                 LVM_INT32 NrChannels);
+void Copy_Float_Stereo_Mc(       const LVM_FLOAT *src,
+                                 LVM_FLOAT *dst,
+                                 LVM_INT16 NrFrames,
+                                 LVM_INT32 NrChannels);
+#endif
 #else
 void Copy_16(                 const LVM_INT16 *src,
                                     LVM_INT16 *dst,
@@ -181,6 +191,12 @@
 void From2iToMono_Float(         const LVM_FLOAT  *src,
                                  LVM_FLOAT  *dst,
                                  LVM_INT16 n);
+#ifdef SUPPORT_MC
+void FromMcToMono_Float(const LVM_FLOAT *src,
+                        LVM_FLOAT *dst,
+                        LVM_INT16 NrFrames,
+                        LVM_INT16 NrChannels);
+#endif
 #else
 void From2iToMono_32(         const LVM_INT32  *src,
                                     LVM_INT32  *dst,
diff --git a/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c b/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c
index fa9f01f..5c8655f 100644
--- a/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c
+++ b/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.c
@@ -305,4 +305,150 @@
 
     return;
 }
+#ifdef SUPPORT_MC
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                  AGC_MIX_VOL_Mc1Mon_D32_WRA                                */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*    Apply AGC and mix signals                                                         */
+/*                                                                                      */
+/*                                                                                      */
+/*  McSrc   ------------------|                                                         */
+/*                            |                                                         */
+/*              ______       _|_        ________                                        */
+/*             |      |     |   |      |        |                                       */
+/*  MonoSrc -->| AGC  |---->| + |----->| Volume |------------------------------+--->    */
+/*             | Gain |     |___|      | Gain   |                              |        */
+/*             |______|                |________|                              |        */
+/*                /|\                               __________     ________    |        */
+/*                 |                               |          |   |        |   |        */
+/*                 |-------------------------------| AGC Gain |<--| Peak   |<--|        */
+/*                                                 | Update   |   | Detect |            */
+/*                                                 |__________|   |________|            */
+/*                                                                                      */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  pInstance               Instance pointer                                            */
+/*  pMcSrc                  Multichannel source                                         */
+/*  pMonoSrc                Mono band pass source                                       */
+/*  pDst                    Multichannel destination                                    */
+/*  NrFrames                Number of frames                                            */
+/*  NrChannels              Number of channels                                          */
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/*  Void                                                                                */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*                                                                                      */
+/****************************************************************************************/
+void AGC_MIX_VOL_Mc1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_FLOAT_t  *pInstance,
+                                 const LVM_FLOAT            *pMcSrc,
+                                 const LVM_FLOAT            *pMonoSrc,
+                                 LVM_FLOAT                  *pDst,
+                                 LVM_UINT16                 NrFrames,
+                                 LVM_UINT16                 NrChannels)
+{
+
+    /*
+     * General variables
+     */
+    LVM_UINT16      i, jj;                                      /* Sample index */
+    LVM_FLOAT       SampleVal;                                  /* Sample value */
+    LVM_FLOAT       Mono;                                       /* Mono sample */
+    LVM_FLOAT       AbsPeak;                                    /* Absolute peak signal */
+    LVM_FLOAT       AGC_Mult;                                   /* Short AGC gain */
+    LVM_FLOAT       Vol_Mult;                                   /* Short volume */
+
+
+    /*
+     * Instance control variables
+     */
+    LVM_FLOAT      AGC_Gain      = pInstance->AGC_Gain;         /* Get the current AGC gain */
+    LVM_FLOAT      AGC_MaxGain   = pInstance->AGC_MaxGain;      /* Get maximum AGC gain */
+    LVM_FLOAT      AGC_Attack    = pInstance->AGC_Attack;       /* Attack scaler */
+    /* Decay scaler */
+    LVM_FLOAT      AGC_Decay     = (pInstance->AGC_Decay * (1 << (DECAY_SHIFT)));
+    LVM_FLOAT      AGC_Target    = pInstance->AGC_Target;       /* Get the target level */
+    LVM_FLOAT      Vol_Current   = pInstance->Volume;           /* Actual volume setting */
+    LVM_FLOAT      Vol_Target    = pInstance->Target;           /* Target volume setting */
+    LVM_FLOAT      Vol_TC        = pInstance->VolumeTC;         /* Time constant */
+
+
+    /*
+     * Process on a sample by sample basis
+     */
+    for (i = 0; i < NrFrames; i++)                                  /* For each frame */
+    {
+
+        /*
+         * Get the scalers
+         */
+        AGC_Mult    = (LVM_FLOAT)(AGC_Gain);              /* Get the AGC gain */
+        Vol_Mult    = (LVM_FLOAT)(Vol_Current);           /* Get the volume gain */
+
+        AbsPeak = 0.0f;
+        /*
+         * Get the input samples
+         */
+        for (jj = 0; jj < NrChannels; jj++)
+        {
+            SampleVal  = *pMcSrc++;                       /* Get the sample value of jj Channel*/
+            Mono       = *pMonoSrc;                       /* Get the mono sample */
+
+            /*
+             * Apply the AGC gain to the mono input and mix with the input signal
+             */
+            SampleVal  += (Mono * AGC_Mult);                        /* Mix in the mono signal */
+
+            /*
+             * Apply the volume and write to the output stream
+             */
+            SampleVal  = SampleVal  * Vol_Mult;
+
+            *pDst++ = SampleVal;                                         /* Save the results */
+
+            /*
+             * Update the AGC gain
+             */
+            AbsPeak = Abs_Float(SampleVal) > AbsPeak ? Abs_Float(SampleVal) : AbsPeak;
+        }
+        if (AbsPeak > AGC_Target)
+        {
+            /*
+             * The signal is too large so decrease the gain
+             */
+            AGC_Gain = AGC_Gain * AGC_Attack;
+        }
+        else
+        {
+            /*
+             * The signal is too small so increase the gain
+             */
+            if (AGC_Gain > AGC_MaxGain)
+            {
+                AGC_Gain -= (AGC_Decay);
+            }
+            else
+            {
+                AGC_Gain += (AGC_Decay);
+            }
+        }
+        pMonoSrc++;
+        /*
+         * Update the gain
+         */
+        Vol_Current +=  (Vol_Target - Vol_Current) * ((LVM_FLOAT)Vol_TC / VOL_TC_FLOAT);
+    }
+
+
+    /*
+     * Update the parameters
+     */
+    pInstance->Volume = Vol_Current;                            /* Actual volume setting */
+    pInstance->AGC_Gain = AGC_Gain;
+
+    return;
+}
+#endif /*SUPPORT_MC*/
 #endif /*BUILD_FLOAT*/
diff --git a/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c
index 960de79..d63365c 100644
--- a/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.c
@@ -123,6 +123,87 @@
         }
 
     }
+
+#ifdef SUPPORT_MC
+/**************************************************************************
+ ASSUMPTIONS:
+ COEFS-
+ pBiquadState->coefs[0] is A2, pBiquadState->coefs[1] is A1
+ pBiquadState->coefs[2] is A0, pBiquadState->coefs[3] is -B2
+ pBiquadState->coefs[4] is -B1
+
+ DELAYS-
+ pBiquadState->pDelays[0] to
+ pBiquadState->pDelays[NrChannels - 1] is x(n-1) for all NrChannels
+
+ pBiquadState->pDelays[NrChannels] to
+ pBiquadState->pDelays[2*NrChannels - 1] is x(n-2) for all NrChannels
+
+ pBiquadState->pDelays[2*NrChannels] to
+ pBiquadState->pDelays[3*NrChannels - 1] is y(n-1) for all NrChannels
+
+ pBiquadState->pDelays[3*NrChannels] to
+ pBiquadState->pDelays[4*NrChannels - 1] is y(n-2) for all NrChannels
+***************************************************************************/
+void BQ_MC_D32F32C30_TRC_WRA_01 (           Biquad_FLOAT_Instance_t      *pInstance,
+                                            LVM_FLOAT                    *pDataIn,
+                                            LVM_FLOAT                    *pDataOut,
+                                            LVM_INT16                    NrFrames,
+                                            LVM_INT16                    NrChannels)
+
+
+    {
+        LVM_FLOAT yn, temp;
+        LVM_INT16 ii, jj;
+        PFilter_State_FLOAT pBiquadState = (PFilter_State_FLOAT) pInstance;
+
+         for (ii = NrFrames; ii != 0; ii--)
+         {
+            /**************************************************************************
+                            PROCESSING CHANNEL-WISE
+            ***************************************************************************/
+            for (jj = 0; jj < NrChannels; jj++)
+            {
+                /* yn= (A2  * x(n-2)) */
+                yn = pBiquadState->coefs[0] * pBiquadState->pDelays[NrChannels + jj];
+
+                /* yn+= (A1  * x(n-1)) */
+                temp = pBiquadState->coefs[1] * pBiquadState->pDelays[jj];
+                yn += temp;
+
+                /* yn+= (A0  * x(n)) */
+                temp = pBiquadState->coefs[2] * (*pDataIn);
+                yn += temp;
+
+                 /* yn+= (-B2  * y(n-2)) */
+                temp = pBiquadState->coefs[3] * pBiquadState->pDelays[NrChannels*3 + jj];
+                yn += temp;
+
+                /* yn+= (-B1  * y(n-1)) */
+                temp = pBiquadState->coefs[4] * pBiquadState->pDelays[NrChannels*2 + jj];
+                yn += temp;
+
+                /**************************************************************************
+                                UPDATING THE DELAYS
+                ***************************************************************************/
+                pBiquadState->pDelays[NrChannels * 3 + jj] =
+                    pBiquadState->pDelays[NrChannels * 2 + jj]; /* y(n-2)=y(n-1)*/
+                pBiquadState->pDelays[NrChannels * 1 + jj] =
+                    pBiquadState->pDelays[jj]; /* x(n-2)=x(n-1)*/
+                pBiquadState->pDelays[NrChannels * 2 + jj] = (LVM_FLOAT)yn; /* Update y(n-1)*/
+                pBiquadState->pDelays[jj] = (*pDataIn); /* Update x(n-1)*/
+                pDataIn++;
+                /**************************************************************************
+                                WRITING THE OUTPUT
+                ***************************************************************************/
+                *pDataOut = (LVM_FLOAT)yn; /* Write jj Channel output */
+                pDataOut++;
+            }
+        }
+
+    }
+#endif /*SUPPORT_MC*/
+
 #else
 void BQ_2I_D32F32C30_TRC_WRA_01 (           Biquad_Instance_t       *pInstance,
                                             LVM_INT32                    *pDataIn,
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index e489031..1f9f659 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -84,5 +84,65 @@
 
     return;
 }
+#ifdef SUPPORT_MC
+// Extract out the stereo channel pair from multichannel source.
+void Copy_Float_Mc_Stereo(const LVM_FLOAT *src,
+                 LVM_FLOAT *dst,
+                 LVM_INT16 NrFrames, /* Number of frames */
+                 LVM_INT32 NrChannels)
+{
+    LVM_INT16 ii;
+
+    if (NrChannels >= 2)
+    {
+        for (ii = NrFrames; ii != 0; ii--)
+        {
+            dst[0] = src[0];
+            dst[1] = src[1];
+            dst += 2;
+            src += NrChannels;
+        }
+    }
+    else if (NrChannels == 1)
+    {   // not expected to occur, provided for completeness.
+        src += (NrFrames - 1);
+        dst += 2 * (NrFrames - 1);
+        for (ii = NrFrames; ii != 0; ii--)
+        {
+            dst[0] = src[0];
+            dst[1] = src[0];
+            dst -= 2;
+            src --;
+        }
+    }
+}
+
+// Merge a multichannel source with stereo contained in dst, to dst.
+void Copy_Float_Stereo_Mc(const LVM_FLOAT *src,
+                 LVM_FLOAT *dst,
+                 LVM_INT16 NrFrames, /* Number of frames*/
+                 LVM_INT32 NrChannels)
+{
+    LVM_INT16 ii, jj;
+    LVM_FLOAT *src_st = dst + 2 * (NrFrames - 1);
+
+    // repack dst which carries stereo information
+    // together with the upper channels of src.
+    dst += NrChannels * (NrFrames - 1);
+    src += NrChannels * (NrFrames - 1);
+    for (ii = NrFrames; ii != 0; ii--)
+    {
+        dst[0] = src_st[0];
+        dst[1] = src_st[1];
+        for (jj = 2; jj < NrChannels; jj++)
+        {
+            dst[jj] = src[jj];
+        }
+        dst    -= NrChannels;
+        src    -= NrChannels;
+        src_st -= 2;
+    }
+}
+#endif
 #endif
 /**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c
index d261c9e..13fac5e 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.c
@@ -33,7 +33,7 @@
         RightDC = pBiquadState->RightDC;
         for(j = NrSamples-1; j >= 0; j--)
         {
-            /* Subtract DC an saturate */
+            /* Subtract DC and saturate */
             Diff =* (pDataIn++) - (LeftDC);
             if (Diff > 1.0f) {
                 Diff = 1.0f; }
@@ -64,6 +64,58 @@
 
 
     }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       DC_Mc_D16_TRC_WRA_01
+ *
+ * DESCRIPTION:
+ *  DC removal from all channels of a multichannel input
+ *
+ * PARAMETERS:
+ *  pInstance      Instance pointer
+ *  pDataIn        Input/Source
+ *  pDataOut       Output/Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void DC_Mc_D16_TRC_WRA_01(Biquad_FLOAT_Instance_t       *pInstance,
+                          LVM_FLOAT               *pDataIn,
+                          LVM_FLOAT               *pDataOut,
+                          LVM_INT16               NrFrames,
+                          LVM_INT16               NrChannels)
+    {
+        LVM_FLOAT *ChDC;
+        LVM_FLOAT Diff;
+        LVM_INT32 j;
+        LVM_INT32 i;
+        PFilter_FLOAT_State_Mc pBiquadState = (PFilter_FLOAT_State_Mc) pInstance;
+
+        ChDC = &pBiquadState->ChDC[0];
+        for (j = NrFrames - 1; j >= 0; j--)
+        {
+            /* Subtract DC and saturate */
+            for (i = NrChannels - 1; i >= 0; i--)
+            {
+                Diff = *(pDataIn++) - (ChDC[i]);
+                if (Diff > 1.0f) {
+                    Diff = 1.0f;
+                } else if (Diff < -1.0f) {
+                    Diff = -1.0f; }
+                *(pDataOut++) = (LVM_FLOAT)Diff;
+                if (Diff < 0) {
+                    ChDC[i] -= DC_FLOAT_STEP;
+                } else {
+                    ChDC[i] += DC_FLOAT_STEP; }
+            }
+
+        }
+
+    }
+#endif
 #else
 void DC_2I_D16_TRC_WRA_01( Biquad_Instance_t       *pInstance,
                            LVM_INT16               *pDataIn,
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c
index 4f4fcd8..0f941a0 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.c
@@ -24,6 +24,17 @@
     pBiquadState->LeftDC        = 0.0f;
     pBiquadState->RightDC       = 0.0f;
 }
+#ifdef SUPPORT_MC
+void  DC_Mc_D16_TRC_WRA_01_Init(Biquad_FLOAT_Instance_t   *pInstance)
+{
+    PFilter_FLOAT_State_Mc pBiquadState  = (PFilter_FLOAT_State_Mc) pInstance;
+    LVM_INT32 i;
+    for (i = 0; i < LVM_MAX_CHANNELS; i++)
+    {
+        pBiquadState->ChDC[i] = 0.0f;
+    }
+}
+#endif
 #else
 void  DC_2I_D16_TRC_WRA_01_Init(Biquad_Instance_t   *pInstance)
 {
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
index fa6b729..db3a6d3 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
@@ -34,6 +34,13 @@
     LVM_FLOAT  RightDC;    /* RightDC  */
 }Filter_FLOAT_State;
 typedef Filter_FLOAT_State * PFilter_FLOAT_State ;
+#ifdef SUPPORT_MC
+typedef struct _Filter_FLOAT_State_Mc_
+{
+    LVM_FLOAT  ChDC[LVM_MAX_CHANNELS];     /* ChannelDC  */
+} Filter_FLOAT_State_Mc;
+typedef Filter_FLOAT_State_Mc * PFilter_FLOAT_State_Mc ;
+#endif
 #else
 typedef struct _Filter_State_
 {
diff --git a/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c
index 192927c..2a50f18 100644
--- a/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.c
@@ -117,6 +117,93 @@
         }
 
     }
+#ifdef SUPPORT_MC
+/**************************************************************************
+ASSUMPTIONS:
+COEFS-
+pBiquadState->coefs[0] is A1,
+pBiquadState->coefs[1] is A0,
+pBiquadState->coefs[2] is -B1,
+DELAYS-
+pBiquadState->pDelays[2*ch + 0] is x(n-1) of the 'ch' - channel
+pBiquadState->pDelays[2*ch + 1] is y(n-1) of the 'ch' - channel
+The index 'ch' runs from 0 to (NrChannels - 1)
+
+PARAMETERS:
+ pInstance        Pointer Instance
+ pDataIn          Input/Source
+ pDataOut         Output/Destination
+ NrFrames         Number of frames
+ NrChannels       Number of channels
+
+RETURNS:
+ void
+***************************************************************************/
+void FO_Mc_D16F32C15_LShx_TRC_WRA_01(Biquad_FLOAT_Instance_t *pInstance,
+                                     LVM_FLOAT               *pDataIn,
+                                     LVM_FLOAT               *pDataOut,
+                                     LVM_INT16               NrFrames,
+                                     LVM_INT16               NrChannels)
+    {
+        LVM_FLOAT   yn;
+        LVM_FLOAT   Temp;
+        LVM_INT16   ii;
+        LVM_INT16   ch;
+        PFilter_Float_State pBiquadState = (PFilter_Float_State) pInstance;
+
+        LVM_FLOAT   *pDelays = pBiquadState->pDelays;
+        LVM_FLOAT   *pCoefs  = &pBiquadState->coefs[0];
+        LVM_FLOAT   A0 = pCoefs[1];
+        LVM_FLOAT   A1 = pCoefs[0];
+        LVM_FLOAT   B1 = pCoefs[2];
+
+
+
+
+        for (ii = NrFrames; ii != 0; ii--)
+        {
+
+            /**************************************************************************
+                            PROCESSING OF THE CHANNELS
+            ***************************************************************************/
+            for (ch = 0; ch < NrChannels; ch++)
+            {
+                // yn =A1  * x(n-1)
+                yn = (LVM_FLOAT)A1 * pDelays[0];
+
+                // yn+=A0  * x(n)
+                yn += (LVM_FLOAT)A0 * (*pDataIn);
+
+                // yn +=  (-B1  * y(n-1))
+                Temp = B1 * pDelays[1];
+                yn += Temp;
+
+
+                /**************************************************************************
+                                UPDATING THE DELAYS
+                ***************************************************************************/
+                pDelays[1] = yn; // Update y(n-1)
+                pDelays[0] = (*pDataIn++); // Update x(n-1)
+
+                /**************************************************************************
+                                WRITING THE OUTPUT
+                ***************************************************************************/
+
+                /*Saturate results*/
+                if (yn > 1.0f)
+                {
+                    yn = 1.0f;
+                } else if (yn < -1.0f) {
+                    yn = -1.0f;
+                }
+
+                *pDataOut++ = (LVM_FLOAT)yn;
+                pDelays += 2;
+            }
+            pDelays -= NrChannels * 2;
+        }
+    }
+#endif
 #else
 void FO_2I_D16F32C15_LShx_TRC_WRA_01(Biquad_Instance_t       *pInstance,
                                      LVM_INT16               *pDataIn,
diff --git a/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c b/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c
index ac1eea8..d02af88 100644
--- a/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c
+++ b/media/libeffects/lvm/lib/Common/src/From2iToMono_32.c
@@ -68,5 +68,47 @@
 
     return;
 }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       FromMcToMono_Float
+ *
+ * DESCRIPTION:
+ *  Creates a mono stream from a multichannel input taking the avergae of
+ *  sample values of all channels
+ *
+ * PARAMETERS:
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void FromMcToMono_Float(const LVM_FLOAT *src,
+                        LVM_FLOAT *dst,
+                        LVM_INT16 NrFrames,
+                        LVM_INT16 NrChannels)
+{
+    LVM_INT16 ii, jj;
+    LVM_FLOAT Temp;
+
+    for (ii = NrFrames; ii != 0; ii--)
+    {
+        Temp = 0.0f;
+        for (jj = NrChannels; jj !=0; jj--)
+        {
+            Temp += (*src);
+            src++;
+        }
+        *dst = Temp / NrChannels;
+        dst++;
+    }
+
+    return;
+}
+#endif
+
 #endif
 /**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c
index d2694cc..419c7c5 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.c
@@ -26,10 +26,10 @@
    FUNCTION LVCore_MIXSOFT_1ST_D16C31_WRA
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_Core_MixInSoft_D16C31_SAT( LVMixer3_FLOAT_st *ptrInstance,
-                                    const LVM_FLOAT     *src,
-                                          LVM_FLOAT     *dst,
-                                          LVM_INT16     n)
+void LVC_Core_MixInSoft_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
+                                   const LVM_FLOAT   *src,
+                                         LVM_FLOAT   *dst,
+                                         LVM_INT16   n)
 {
 
     LVM_INT16   OutLoop;
@@ -114,6 +114,139 @@
     }
     pInstance->Current = Current;
 }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_Core_MixInSoft_Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input.
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_Core_MixInSoft_Mc_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
+                                      const LVM_FLOAT   *src,
+                                            LVM_FLOAT   *dst,
+                                            LVM_INT16   NrFrames,
+                                            LVM_INT16   NrChannels)
+{
+
+    LVM_INT16   OutLoop;
+    LVM_INT16   InLoop;
+    LVM_INT32   ii, jj;
+    Mix_Private_FLOAT_st  *pInstance = (Mix_Private_FLOAT_st *)(ptrInstance->PrivateParams);
+    LVM_FLOAT   Delta = pInstance->Delta;
+    LVM_FLOAT   Current = pInstance->Current;
+    LVM_FLOAT   Target = pInstance->Target;
+    LVM_FLOAT   Temp;
+
+    /*
+     * Same operation is performed on consecutive frames.
+     * So two frames are processed in one iteration and
+     * the loop will run only for half the NrFrames value times.
+     */
+    InLoop = (LVM_INT16)(NrFrames >> 1);
+    /* OutLoop is calculated to handle cases where NrFrames value can be odd.*/
+    OutLoop = (LVM_INT16)(NrFrames - (InLoop << 1));
+
+    if (Current < Target) {
+        if (OutLoop) {
+            Temp = Current + Delta;
+            Current = Temp;
+            if (Current > Target)
+                Current = Target;
+
+           for (ii = OutLoop*NrChannels; ii != 0; ii--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+            Temp = Current + Delta;
+            Current = Temp;
+            if (Current > Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+            }
+        }
+    }
+    else{
+        if (OutLoop) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (ii = OutLoop*NrChannels; ii != 0; ii--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--) {
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+                Temp = (*dst) + (*(src++) * Current);
+                if (Temp > 1.0f)
+                    *dst++ = 1.0f;
+                else if (Temp < -1.0f)
+                    *dst++ = -1.0f;
+                else
+                    *dst++ = Temp;
+
+            }
+        }
+    }
+    pInstance->Current = Current;
+}
+
+#endif
 #else
 void LVC_Core_MixInSoft_D16C31_SAT( LVMixer3_st *ptrInstance,
                                     const LVM_INT16     *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c
index b5e7f5c..5bfdad8 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.c
@@ -27,10 +27,10 @@
    FUNCTION LVCore_MIXSOFT_1ST_D16C31_WRA
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance,
-                                    const LVM_FLOAT     *src,
-                                          LVM_FLOAT     *dst,
-                                          LVM_INT16     n)
+void LVC_Core_MixSoft_1St_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
+                                     const LVM_FLOAT   *src,
+                                           LVM_FLOAT   *dst,
+                                           LVM_INT16   n)
 {
     LVM_INT16   OutLoop;
     LVM_INT16   InLoop;
@@ -105,6 +105,119 @@
     }
     pInstance->Current=Current;
 }
+
+
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_Core_MixSoft_Mc_D16C31_WRA
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_Core_MixSoft_Mc_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT   *src,
+                                          LVM_FLOAT   *dst,
+                                          LVM_INT16   NrFrames,
+                                          LVM_INT16   NrChannels)
+{
+    LVM_INT16   OutLoop;
+    LVM_INT16   InLoop;
+    LVM_INT32   ii, jj;
+    Mix_Private_FLOAT_st  *pInstance=(Mix_Private_FLOAT_st *)(ptrInstance->PrivateParams);
+    LVM_FLOAT   Delta= (LVM_FLOAT)pInstance->Delta;
+    LVM_FLOAT   Current = (LVM_FLOAT)pInstance->Current;
+    LVM_FLOAT   Target= (LVM_FLOAT)pInstance->Target;
+    LVM_FLOAT   Temp;
+
+    /*
+     * Same operation is performed on consecutive frames.
+     * So two frames are processed in one iteration and
+     * the loop will run only for half the NrFrames value times.
+     */
+    InLoop = (LVM_INT16)(NrFrames >> 1);
+    /* OutLoop is calculated to handle cases where NrFrames value can be odd.*/
+    OutLoop = (LVM_INT16)(NrFrames - (InLoop << 1));
+
+    if (Current<Target) {
+        if (OutLoop) {
+
+            Temp = Current + Delta;
+            if (Temp > 1.0f)
+                Temp = 1.0f;
+            else if (Temp < -1.0f)
+                Temp = -1.0f;
+
+            Current=Temp;
+            if (Current > Target)
+                Current = Target;
+
+            for (ii = OutLoop; ii != 0; ii--) {
+                for (jj = NrChannels; jj !=0; jj--) {
+                    *(dst++) = (((LVM_FLOAT)*(src++) * (LVM_FLOAT)Current));
+                }
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+
+            Temp = Current + Delta;
+
+            if (Temp > 1.0f)
+                Temp = 1.0f;
+            else if (Temp < -1.0f)
+                Temp = -1.0f;
+
+            Current=Temp;
+            if (Current > Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--)
+            {
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+            }
+        }
+    }
+    else{
+        if (OutLoop) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (ii = OutLoop; ii != 0; ii--) {
+                for (jj = NrChannels; jj !=0; jj--) {
+                    *(dst++) = (((LVM_FLOAT)*(src++) * (LVM_FLOAT)Current));
+                }
+            }
+        }
+
+        for (ii = InLoop; ii != 0; ii--) {
+            Current -= Delta;
+            if (Current < Target)
+                Current = Target;
+
+            for (jj = NrChannels; jj != 0 ; jj--)
+            {
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+                *(dst++) = (((LVM_FLOAT)*(src++) * Current));
+            }
+        }
+    }
+    pInstance->Current=Current;
+}
+#endif
+
 #else
 void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_st *ptrInstance,
                                     const LVM_INT16     *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c
index 192f126..65956f7 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.c
@@ -34,10 +34,10 @@
    FUNCTION MIXINSOFT_D16C31_SAT
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_FLOAT_st *ptrInstance,
-                               LVM_FLOAT             *src,
-                               LVM_FLOAT             *dst,
-                               LVM_INT16             n)
+void LVC_MixInSoft_D16C31_SAT(LVMixer3_1St_FLOAT_st *ptrInstance,
+                              const LVM_FLOAT       *src,
+                                    LVM_FLOAT       *dst,
+                                    LVM_INT16       n)
 {
     char        HardMixing = TRUE;
     LVM_FLOAT   TargetGain;
@@ -106,6 +106,110 @@
     }
 
 }
+
+
+
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_MixInSoft_Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_MixInSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *ptrInstance,
+                                 const LVM_FLOAT       *src,
+                                       LVM_FLOAT       *dst,
+                                       LVM_INT16       NrFrames,
+                                       LVM_INT16       NrChannels)
+{
+    char        HardMixing = TRUE;
+    LVM_FLOAT   TargetGain;
+    Mix_Private_FLOAT_st  *pInstance = \
+                             (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+    if (pInstance->Current != pInstance->Target)
+    {
+        if (pInstance->Delta == 1.0f) {
+            pInstance->Current = pInstance->Target;
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else{
+            /* Soft mixing has to be applied */
+            HardMixing = FALSE;
+            LVC_Core_MixInSoft_Mc_D16C31_SAT(&(ptrInstance->MixerStream[0]),
+                                             src,
+                                             dst,
+                                             NrFrames,
+                                             NrChannels);
+        }
+    }
+
+    /******************************************************************************
+       HARD MIXING
+    *******************************************************************************/
+
+    if (HardMixing) {
+        if (pInstance->Target != 0) { /* Nothing to do in case Target = 0 */
+            if ((pInstance->Target) == 1.0f) {
+                Add2_Sat_Float(src, dst, NrFrames*NrChannels);
+            }
+            else{
+                Mac3s_Sat_Float(src,
+                                (pInstance->Target),
+                                dst,
+                                NrFrames * NrChannels);
+                /* In case the LVCore function would have changed the Current value */
+                pInstance->Current = pInstance->Target;
+            }
+        }
+    }
+
+
+    /******************************************************************************
+       CALL BACK
+    *******************************************************************************/
+
+    if (ptrInstance->MixerStream[0].CallbackSet) {
+        if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(ptrInstance->MixerStream, TargetGain);
+            ptrInstance->MixerStream[0].CallbackSet = FALSE;
+            if (ptrInstance->MixerStream[0].pCallBack != 0) {
+                (*ptrInstance->MixerStream[0].pCallBack) (\
+                                                ptrInstance->MixerStream[0].pCallbackHandle,
+                                                ptrInstance->MixerStream[0].pGeneralPurpose,
+                                                ptrInstance->MixerStream[0].CallbackParam);
+            }
+        }
+    }
+
+}
+#endif
+
+
 #else
 void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_st *ptrInstance,
                                     LVM_INT16             *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c
index 1017de3..0678ae0 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.c
@@ -103,6 +103,101 @@
         }
     }
 }
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_MixSoft_Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src            Source
+ *  dst            Destination
+ *  NrFrames       Number of Frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_MixSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *ptrInstance,
+                                  const LVM_FLOAT      *src,
+                                        LVM_FLOAT      *dst,
+                                        LVM_INT16      NrFrames,
+                                        LVM_INT16      NrChannels)
+{
+    char        HardMixing = TRUE;
+    LVM_FLOAT   TargetGain;
+    Mix_Private_FLOAT_st  *pInstance = \
+                          (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+    if (pInstance->Current != pInstance->Target)
+    {
+        if (pInstance->Delta == 1.0f) {
+            pInstance->Current = pInstance->Target;
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+        }else{
+            /* Soft mixing has to be applied */
+            HardMixing = FALSE;
+            LVC_Core_MixSoft_Mc_D16C31_WRA(&(ptrInstance->MixerStream[0]),
+                                           src,
+                                           dst,
+                                           NrFrames,
+                                           NrChannels);
+        }
+    }
+
+    /******************************************************************************
+       HARD MIXING
+    *******************************************************************************/
+
+    if (HardMixing) {
+        if (pInstance->Target == 0)
+            LoadConst_Float(0.0, dst, NrFrames * NrChannels);
+        else {
+            if ((pInstance->Target) != 1.0f)
+                Mult3s_Float(src, (pInstance->Target), dst, NrFrames * NrChannels);
+            else if (src != dst)
+                Copy_Float(src, dst, NrFrames * NrChannels);
+        }
+
+    }
+
+    /******************************************************************************
+       CALL BACK
+    *******************************************************************************/
+
+    if (ptrInstance->MixerStream[0].CallbackSet) {
+        if (Abs_Float(pInstance->Current - pInstance->Target) < pInstance->Delta) {
+            pInstance->Current = pInstance->Target; /* Difference is not significant anymore. \
+                                                       Make them equal. */
+            TargetGain = pInstance->Target;
+            LVC_Mixer_SetTarget(ptrInstance->MixerStream, TargetGain);
+            ptrInstance->MixerStream[0].CallbackSet = FALSE;
+            if (ptrInstance->MixerStream[0].pCallBack != 0) {
+                (*ptrInstance->MixerStream[0].pCallBack) (\
+                                                ptrInstance->MixerStream[0].pCallbackHandle,
+                                                ptrInstance->MixerStream[0].pGeneralPurpose,
+                                                ptrInstance->MixerStream[0].CallbackParam);
+            }
+        }
+    }
+}
+
+#endif
+
 #else
 void LVC_MixSoft_1St_D16C31_SAT( LVMixer3_1St_st *ptrInstance,
                                   const LVM_INT16             *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c
index 3c90071..8a89de1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.c
@@ -26,11 +26,11 @@
    FUNCTION LVC_MixSoft_2St_D16C31_SAT.c
 ***********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_FLOAT_st *ptrInstance,
-                                 const   LVM_FLOAT       *src1,
-                                 LVM_FLOAT       *src2,
-                                 LVM_FLOAT       *dst,
-                                 LVM_INT16       n)
+void LVC_MixSoft_2St_D16C31_SAT(LVMixer3_2St_FLOAT_st *ptrInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                      LVM_FLOAT       *dst,
+                                      LVM_INT16       n)
 {
     Mix_Private_FLOAT_st  *pInstance1 = \
                              (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
@@ -67,6 +67,70 @@
                                          src1, src2, dst, n);
     }
 }
+
+#ifdef SUPPORT_MC
+/*
+ * FUNCTION:       LVC_MixSoft_2Mc_D16C31_SAT
+ *
+ * DESCRIPTION:
+ *  2 stream Mixer function with support for processing multichannel input
+ *
+ * PARAMETERS:
+ *  ptrInstance    Instance pointer
+ *  src1           First multichannel source
+ *  src2           Second multichannel source
+ *  dst            Destination
+ *  NrFrames       Number of frames
+ *  NrChannels     Number of channels
+ *
+ * RETURNS:
+ *  void
+ *
+ */
+void LVC_MixSoft_2Mc_D16C31_SAT(LVMixer3_2St_FLOAT_st *ptrInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                      LVM_FLOAT       *dst,
+                                      LVM_INT16       NrFrames,
+                                      LVM_INT16       NrChannels)
+{
+    Mix_Private_FLOAT_st  *pInstance1 = \
+                             (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+    Mix_Private_FLOAT_st  *pInstance2 = \
+                             (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[1].PrivateParams);
+
+    if (NrFrames <= 0)    return;
+
+    /******************************************************************************
+       SOFT MIXING
+    *******************************************************************************/
+    if ((pInstance1->Current == pInstance1->Target) && (pInstance1->Current == 0)) {
+        LVC_MixSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[1]),
+                                    src2, dst, NrFrames, NrChannels);
+    }
+    else if ((pInstance2->Current == pInstance2->Target) && (pInstance2->Current == 0)) {
+        LVC_MixSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[0]),
+                                    src1, dst, NrFrames, NrChannels);
+    }
+    else if ((pInstance1->Current != pInstance1->Target) || \
+                                    (pInstance2->Current != pInstance2->Target))
+    {
+        LVC_MixSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[0]),
+                                   src1, dst, NrFrames, NrChannels);
+        LVC_MixInSoft_Mc_D16C31_SAT((LVMixer3_1St_FLOAT_st *)(&ptrInstance->MixerStream[1]),
+                                   src2, dst, NrFrames, NrChannels);
+    }
+    else{
+        /******************************************************************************
+           HARD MIXING
+        *******************************************************************************/
+        LVC_Core_MixHard_2St_D16C31_SAT(&ptrInstance->MixerStream[0],
+                                        &ptrInstance->MixerStream[1],
+                                        src1, src2, dst, NrFrames * NrChannels);
+    }
+}
+#endif
+
 #else
 void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_st *ptrInstance,
                                     const   LVM_INT16       *src1,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
index f904915..7f18747 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
@@ -157,10 +157,18 @@
 /*** 16 bit functions *************************************************************/
 
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_1St_D16C31_SAT( LVMixer3_1St_FLOAT_st *pInstance,
-                                 const LVM_FLOAT           *src,
-                                 LVM_FLOAT           *dst,
-                                 LVM_INT16           n);
+void LVC_MixSoft_1St_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                                const LVM_FLOAT       *src,
+                                      LVM_FLOAT       *dst,
+                                      LVM_INT16       n);
+#ifdef SUPPORT_MC
+void LVC_MixSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                               const LVM_FLOAT       *src,
+                                     LVM_FLOAT       *dst,
+                                     LVM_INT16       NrFrames,
+                                     LVM_INT16       NrChannels);
+#endif
+
 #else
 void LVC_MixSoft_1St_D16C31_SAT( LVMixer3_1St_st *pInstance,
                                   const LVM_INT16           *src,
@@ -169,10 +177,18 @@
 #endif
 
 #ifdef BUILD_FLOAT
-void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_FLOAT_st *pInstance,
-                               LVM_FLOAT           *src,
-                               LVM_FLOAT           *dst,
-                               LVM_INT16           n);
+void LVC_MixInSoft_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                              const LVM_FLOAT       *src,
+                                    LVM_FLOAT       *dst,
+                                    LVM_INT16       n);
+#ifdef SUPPORT_MC
+void LVC_MixInSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
+                                 const LVM_FLOAT       *src,
+                                       LVM_FLOAT       *dst,
+                                       LVM_INT16       NrFrames,
+                                       LVM_INT16       NrChannels);
+#endif
+
 #else
 void LVC_MixInSoft_D16C31_SAT( LVMixer3_1St_st *pInstance,
                                         LVM_INT16           *src,
@@ -181,11 +197,19 @@
 #endif
 
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_FLOAT_st *pInstance,
-                                 const LVM_FLOAT             *src1,
-                                 LVM_FLOAT             *src2,
-                                 LVM_FLOAT             *dst,  /* dst cannot be equal to src2 */
-                                 LVM_INT16             n);
+void LVC_MixSoft_2St_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                LVM_FLOAT             *dst,  /* dst cannot be equal to src2 */
+                                LVM_INT16             n);
+#ifdef SUPPORT_MC
+void LVC_MixSoft_2Mc_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                const LVM_FLOAT       *src1,
+                                const LVM_FLOAT       *src2,
+                                LVM_FLOAT             *dst,  /* dst cannot be equal to src2 */
+                                LVM_INT16             NrFrames,
+                                LVM_INT16             NrChannels);
+#endif
 #else
 void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_st *pInstance,
                                 const LVM_INT16             *src1,
@@ -200,10 +224,10 @@
 /* Gain values should not be more that 1.0                                        */
 /**********************************************************************************/
 #ifdef BUILD_FLOAT
-void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_FLOAT_st         *pInstance,
-                                    const   LVM_FLOAT           *src,
-                                    LVM_FLOAT           *dst,   /* dst can be equal to src */
-                                    LVM_INT16           n);     /* Number of stereo samples */
+void LVC_MixSoft_1St_2i_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+                                   const   LVM_FLOAT     *src,
+                                   LVM_FLOAT             *dst,   /* dst can be equal to src */
+                                   LVM_INT16             n);     /* Number of stereo samples */
 #else
 void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_st         *pInstance,
                                 const   LVM_INT16           *src,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c
index c67455a..507eefa 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_GetTarget.c
@@ -35,7 +35,7 @@
 {
     LVM_FLOAT       TargetGain;
     Mix_Private_FLOAT_st  *pInstance = (Mix_Private_FLOAT_st *)pStream->PrivateParams;
-    
+
     TargetGain = pInstance->Target;  // TargetGain
     return TargetGain;
 }
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
index d0d0e1f..f10094b 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
@@ -62,6 +62,13 @@
                                     const LVM_FLOAT     *src,
                                     LVM_FLOAT     *dst,
                                     LVM_INT16     n);
+#ifdef SUPPORT_MC
+void LVC_Core_MixInSoft_Mc_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT     *src,
+                                          LVM_FLOAT     *dst,
+                                          LVM_INT16     NrFrames,
+                                          LVM_INT16     NrChannels);
+#endif
 #else
 void LVC_Core_MixInSoft_D16C31_SAT( LVMixer3_st *pInstance,
                                     const LVM_INT16     *src,
@@ -73,6 +80,13 @@
                                       const LVM_FLOAT     *src,
                                       LVM_FLOAT     *dst,
                                       LVM_INT16     n);
+#ifdef SUPPORT_MC
+void LVC_Core_MixSoft_Mc_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
+                                    const LVM_FLOAT     *src,
+                                          LVM_FLOAT     *dst,
+                                          LVM_INT16     NrFrames,
+                                          LVM_INT16     NrChannels);
+#endif
 #else
 void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_st *pInstance,
                                     const LVM_INT16     *src,
diff --git a/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c b/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c
index 9c17a05..6c8b2db 100644
--- a/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c
+++ b/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.c
@@ -119,6 +119,80 @@
         }
 
     }
+
+#ifdef SUPPORT_MC
+/**************************************************************************
+DELAYS-
+pBiquadState->pDelays[0] to
+pBiquadState->pDelays[NrChannels - 1] is x(n-1) for all NrChannels
+
+pBiquadState->pDelays[NrChannels] to
+pBiquadState->pDelays[2*NrChannels - 1] is x(n-2) for all NrChannels
+
+pBiquadState->pDelays[2*NrChannels] to
+pBiquadState->pDelays[3*NrChannels - 1] is y(n-1) for all NrChannels
+
+pBiquadState->pDelays[3*NrChannels] to
+pBiquadState->pDelays[4*NrChannels - 1] is y(n-2) for all NrChannels
+***************************************************************************/
+
+void PK_Mc_D32F32C14G11_TRC_WRA_01 (Biquad_FLOAT_Instance_t       *pInstance,
+                                    LVM_FLOAT               *pDataIn,
+                                    LVM_FLOAT               *pDataOut,
+                                    LVM_INT16               NrFrames,
+                                    LVM_INT16               NrChannels)
+    {
+        LVM_FLOAT yn, ynO, temp;
+        LVM_INT16 ii, jj;
+        PFilter_State_Float pBiquadState = (PFilter_State_Float) pInstance;
+
+         for (ii = NrFrames; ii != 0; ii--)
+         {
+
+            for (jj = 0; jj < NrChannels; jj++)
+            {
+                /**************************************************************************
+                                PROCESSING OF THE jj CHANNEL
+                ***************************************************************************/
+                /* yn= (A0  * (x(n) - x(n-2)))*/
+                temp = (*pDataIn) - pBiquadState->pDelays[NrChannels + jj];
+                yn = temp * pBiquadState->coefs[0];
+
+                /* yn+= ((-B2  * y(n-2))) */
+                temp = pBiquadState->pDelays[NrChannels*3 + jj] * pBiquadState->coefs[1];
+                yn += temp;
+
+                /* yn+= ((-B1 * y(n-1))) */
+                temp = pBiquadState->pDelays[NrChannels*2 + jj] * pBiquadState->coefs[2];
+                yn += temp;
+
+                /* ynO= ((Gain * yn)) */
+                ynO = yn * pBiquadState->coefs[3];
+
+                /* ynO=(ynO + x(n))*/
+                ynO += (*pDataIn);
+
+                /**************************************************************************
+                                UPDATING THE DELAYS
+                ***************************************************************************/
+                pBiquadState->pDelays[NrChannels * 3 + jj] =
+                    pBiquadState->pDelays[NrChannels * 2 + jj]; /* y(n-2)=y(n-1)*/
+                pBiquadState->pDelays[NrChannels * 1 + jj] =
+                    pBiquadState->pDelays[jj]; /* x(n-2)=x(n-1)*/
+                pBiquadState->pDelays[NrChannels * 2 + jj] = yn; /* Update y(n-1) */
+                pBiquadState->pDelays[jj] = (*pDataIn); /* Update x(n-1)*/
+                pDataIn++;
+
+                /**************************************************************************
+                                WRITING THE OUTPUT
+                ***************************************************************************/
+                *pDataOut = ynO; /* Write output*/
+                pDataOut++;
+            }
+        }
+
+    }
+#endif
 #else
 void PK_2I_D32F32C14G11_TRC_WRA_01 ( Biquad_Instance_t       *pInstance,
                                      LVM_INT32               *pDataIn,
diff --git a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
index 8e0b738..e7fdbf6 100644
--- a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
+++ b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
@@ -267,7 +267,9 @@
     /* Equaliser parameters */
     LVM_UINT16                  NBands;                 /* Number of bands */
     LVEQNB_BandDef_t            *pBandDefinition;       /* Pointer to equaliser definitions */
-
+#ifdef SUPPORT_MC
+    LVM_INT16                   NrChannels;
+#endif
 } LVEQNB_Params_t;
 
 
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
index 56b02e0..a9cd5fd 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
@@ -46,7 +46,12 @@
 #define LVEQNB_INSTANCE_ALIGN       4                   /* 32-bit alignment for instance structures */
 #define LVEQNB_DATA_ALIGN           4                   /* 32-bit alignment for structures */
 #define LVEQNB_COEF_ALIGN           4                   /* 32-bit alignment for long words */
+#ifdef SUPPORT_MC
+/* Number of buffers required for inplace processing */
+#define LVEQNB_SCRATCHBUFFERS       (LVM_MAX_CHANNELS * 2)
+#else
 #define LVEQNB_SCRATCHBUFFERS       4                   /* Number of buffers required for inplace processing */
+#endif
 #define LVEQNB_SCRATCH_ALIGN        4                   /* 32-bit alignment for long data */
 
 #define LVEQNB_BYPASS_MIXER_TC      100                 /* Bypass Mixer TC */
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c
index 6328181..d188c0e 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.c
@@ -26,6 +26,7 @@
 #include "VectorArithmetic.h"
 #include "BIQUAD.h"
 
+#include <log/log.h>
 
 /****************************************************************************************/
 /*                                                                                      */
@@ -61,14 +62,18 @@
 LVEQNB_ReturnStatus_en LVEQNB_Process(LVEQNB_Handle_t       hInstance,
                                       const LVM_FLOAT       *pInData,
                                       LVM_FLOAT             *pOutData,
-                                      LVM_UINT16            NumSamples)
-{
-
-    LVM_UINT16          i;
-    Biquad_FLOAT_Instance_t   *pBiquad;
+                                      const LVM_UINT16      NrFrames)
+{                                     // updated to use samples = frames * channels.
     LVEQNB_Instance_t   *pInstance = (LVEQNB_Instance_t  *)hInstance;
-    LVM_FLOAT           *pScratch;
 
+#ifdef SUPPORT_MC
+    // Mono passed in as stereo
+    const LVM_INT32 NrChannels = pInstance->Params.NrChannels == 1
+        ? 2 : pInstance->Params.NrChannels;
+#else
+    const LVM_INT32 NrChannels = 2; // FCC_2
+#endif
+    const LVM_INT32 NrSamples = NrChannels * NrFrames;
 
      /* Check for NULL pointers */
     if((hInstance == LVM_NULL) || (pInData == LVM_NULL) || (pOutData == LVM_NULL))
@@ -82,14 +87,14 @@
         return LVEQNB_ALIGNMENTERROR;
     }
 
-    pScratch  = (LVM_FLOAT *)pInstance->pFastTemporary;
+    LVM_FLOAT * const pScratch = (LVM_FLOAT *)pInstance->pFastTemporary;
 
     /*
-    * Check the number of samples is not too large
+    * Check the number of frames is not too large
     */
-    if (NumSamples > pInstance->Capabilities.MaxBlockSize)
+    if (NrFrames > pInstance->Capabilities.MaxBlockSize)
     {
-        return(LVEQNB_TOOMANYSAMPLES);
+        return LVEQNB_TOOMANYSAMPLES;
     }
 
     if (pInstance->Params.OperatingMode == LVEQNB_ON)
@@ -97,16 +102,16 @@
         /*
          * Copy input data in to scratch buffer
          */
+        Copy_Float(pInData,     /* Source */
+                   pScratch,    /* Destination */
+                   (LVM_INT16)NrSamples);
 
-        Copy_Float((LVM_FLOAT *)pInData,      /* Source */
-                   pScratch,                  /* Destination */
-                   (LVM_INT16)(2 * NumSamples)); /* Left and Right */
         /*
          * For each section execte the filter unless the gain is 0dB
          */
         if (pInstance->NBands != 0)
         {
-            for (i = 0; i < pInstance->NBands; i++)
+            for (LVM_UINT16 i = 0; i < pInstance->NBands; i++)
             {
                 /*
                  * Check if band is non-zero dB gain
@@ -116,7 +121,7 @@
                     /*
                      * Get the address of the biquad instance
                      */
-                    pBiquad = &pInstance->pEQNB_FilterState_Float[i];
+                    Biquad_FLOAT_Instance_t *pBiquad = &pInstance->pEQNB_FilterState_Float[i];
 
 
                     /*
@@ -126,10 +131,18 @@
                     {
                         case LVEQNB_SinglePrecision_Float:
                         {
+#ifdef SUPPORT_MC
+                            PK_Mc_D32F32C14G11_TRC_WRA_01(pBiquad,
+                                                          pScratch,
+                                                          pScratch,
+                                                          (LVM_INT16)NrFrames,
+                                                          (LVM_INT16)NrChannels);
+#else
                             PK_2I_D32F32C14G11_TRC_WRA_01(pBiquad,
-                                                          (LVM_FLOAT *)pScratch,
-                                                          (LVM_FLOAT *)pScratch,
-                                                          (LVM_INT16)NumSamples);
+                                                          pScratch,
+                                                          pScratch,
+                                                          (LVM_INT16)NrFrames);
+#endif
                             break;
                         }
                         default:
@@ -141,19 +154,29 @@
 
 
         if(pInstance->bInOperatingModeTransition == LVM_TRUE){
+#ifdef SUPPORT_MC
+            LVC_MixSoft_2Mc_D16C31_SAT(&pInstance->BypassMixer,
+                                       pScratch,
+                                       pInData,
+                                       pScratch,
+                                       (LVM_INT16)NrFrames,
+                                       (LVM_INT16)NrChannels);
+#else
             LVC_MixSoft_2St_D16C31_SAT(&pInstance->BypassMixer,
-                                       (LVM_FLOAT *)pScratch,
-                                       (LVM_FLOAT *)pInData,
-                                       (LVM_FLOAT *)pScratch,
-                                       (LVM_INT16)(2 * NumSamples));
-            Copy_Float((LVM_FLOAT*)pScratch,                           /* Source */
-                       pOutData,                                       /* Destination */
-                       (LVM_INT16)(2 * NumSamples));                     /* Left and Right samples */
+                                       pScratch,
+                                       pInData,
+                                       pScratch,
+                                       (LVM_INT16)NrSamples);
+#endif
+            // duplicate with else clause(s)
+            Copy_Float(pScratch,                         /* Source */
+                       pOutData,                         /* Destination */
+                       (LVM_INT16)NrSamples);            /* All channel samples */
         }
         else{
             Copy_Float(pScratch,              /* Source */
                        pOutData,              /* Destination */
-                       (LVM_INT16 )(2 * NumSamples)); /* Left and Right */
+                       (LVM_INT16)NrSamples); /* All channel samples */
         }
     }
     else
@@ -163,12 +186,12 @@
          */
         if (pInData != pOutData)
         {
-            Copy_Float(pInData,                                    /* Source */
-                       pOutData,                                   /* Destination */
-                       (LVM_INT16)(2 * NumSamples));                 /* Left and Right samples */
+            Copy_Float(pInData,                          /* Source */
+                       pOutData,                         /* Destination */
+                       (LVM_INT16)NrSamples);            /* All channel samples */
         }
     }
-    return(LVEQNB_SUCCESS);
+    return LVEQNB_SUCCESS;
 
 }
 #else
@@ -312,4 +335,4 @@
     return(LVEQNB_SUCCESS);
 
 }
-#endif
\ No newline at end of file
+#endif
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c b/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c
index 566d84f..1d1283e 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_Process.c
@@ -579,7 +579,7 @@
                                pTemp,
                                pTemp,
                                (LVM_INT16)NumSamples);
-    
+
     /*
      *  Process all delay lines
      */
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
index a719053..8c7807f 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.c
@@ -64,12 +64,21 @@
         (
         (pNewParams->SampleRate != LVM_FS_8000) && (pNewParams->SampleRate != LVM_FS_11025) && (pNewParams->SampleRate != LVM_FS_12000)       &&
         (pNewParams->SampleRate != LVM_FS_16000) && (pNewParams->SampleRate != LVM_FS_22050) && (pNewParams->SampleRate != LVM_FS_24000)       &&
-        (pNewParams->SampleRate != LVM_FS_32000) && (pNewParams->SampleRate != LVM_FS_44100) && (pNewParams->SampleRate != LVM_FS_48000)      
+        (pNewParams->SampleRate != LVM_FS_32000) &&
+        (pNewParams->SampleRate != LVM_FS_44100) &&
+        (pNewParams->SampleRate != LVM_FS_48000)
 #ifdef HIGHER_FS
         && (pNewParams->SampleRate != LVM_FS_96000) && (pNewParams->SampleRate != LVM_FS_192000)
 #endif
         )
+#ifdef SUPPORT_MC
+        || ((pNewParams->SourceFormat != LVM_STEREO)       &&
+            (pNewParams->SourceFormat != LVM_MONOINSTEREO) &&
+            (pNewParams->SourceFormat != LVM_MONO)         &&
+            (pNewParams->SourceFormat != LVM_MULTICHANNEL)))
+#else
         || ((pNewParams->SourceFormat != LVM_STEREO) && (pNewParams->SourceFormat != LVM_MONOINSTEREO) && (pNewParams->SourceFormat != LVM_MONO)) )
+#endif
     {
         return (LVREV_OUTOFRANGE);
     }
diff --git a/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h b/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
index e75695e..e507a7c 100644
--- a/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
+++ b/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
@@ -205,6 +205,9 @@
     LVM_Fs_en               SampleRate;             /* Sampling rate */
     LVM_INT16               EffectLevel;            /* Effect level */
     LVM_UINT16              ReverbLevel;            /* Reverb level in % */
+#ifdef SUPPORT_MC
+    LVM_INT32               NrChannels;
+#endif
 } LVCS_Params_t;
 
 
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
index a97e4f0..ab8ccd1 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
@@ -61,6 +61,15 @@
 
 /* Memory */
 #define LVCS_SCRATCHBUFFERS              6      /* Number of buffers required for inplace processing */
+#ifdef SUPPORT_MC
+/*
+ * The Concert Surround module applies processing only on the first two
+ * channels of a multichannel input. The data of first two channels is copied
+ * from the multichannel input into scratch buffer. The buffers added here
+ * are used for this purpose
+ */
+#define LVCS_MC_SCRATCHBUFFERS           2
+#endif
 
 /* General */
 #define LVCS_INVALID                0xFFFF      /* Invalid init parameter */
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
index 3956d4d..ef1d9eb 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
@@ -76,6 +76,22 @@
     LVCS_Instance_t     *pInstance = (LVCS_Instance_t  *)hInstance;
     LVM_FLOAT           *pScratch;
     LVCS_ReturnStatus_en err;
+#ifdef SUPPORT_MC
+    LVM_FLOAT           *pStIn;
+    LVM_INT32           channels = pInstance->Params.NrChannels;
+#define NrFrames NumSamples  // alias for clarity
+
+    /*In case of mono processing, stereo input is created from mono
+     *and stored in pInData before applying any of the effects.
+     *However we do not update the value pInstance->Params.NrChannels
+     *at this point.
+     *So to treat the pInData as stereo we are setting channels to 2
+     */
+    if (channels == 1)
+    {
+        channels = 2;
+    }
+#endif
 
     pScratch  = (LVM_FLOAT *) \
                   pInstance->MemoryTable.Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress;
@@ -83,6 +99,25 @@
     /*
      * Check if the processing is inplace
      */
+#ifdef SUPPORT_MC
+    /*
+     * The pInput buffer holds the first 2 (Left, Right) channels information.
+     * Hence the memory required by this buffer is 2 * NumFrames.
+     * The Concert Surround module carries out processing only on L, R.
+     */
+    pInput = pScratch + (2 * NrFrames);
+    pStIn  = pScratch + (LVCS_SCRATCHBUFFERS * NrFrames);
+    /* The first two channel data is extracted from the input data and
+     * copied into pInput buffer
+     */
+    Copy_Float_Mc_Stereo((LVM_FLOAT *)pInData,
+                         (LVM_FLOAT *)pInput,
+                         NrFrames,
+                         channels);
+    Copy_Float((LVM_FLOAT *)pInput,
+               (LVM_FLOAT *)pStIn,
+               (LVM_INT16)(2 * NrFrames));
+#else
     if (pInData == pOutData)
     {
         /* Processing inplace */
@@ -96,14 +131,21 @@
         /* Processing outplace */
         pInput = pInData;
     }
-
+#endif
     /*
      * Call the stereo enhancer
      */
+#ifdef SUPPORT_MC
+    err = LVCS_StereoEnhancer(hInstance,              /* Instance handle */
+                              pStIn,                  /* Pointer to the input data */
+                              pOutData,               /* Pointer to the output data */
+                              NrFrames);              /* Number of frames to process */
+#else
     err = LVCS_StereoEnhancer(hInstance,              /* Instance handle */
                               pInData,                    /* Pointer to the input data */
                               pOutData,                   /* Pointer to the output data */
                               NumSamples);                /* Number of samples to process */
+#endif
 
     /*
      * Call the reverb generator
@@ -112,7 +154,7 @@
                                pOutData,                  /* Pointer to the input data */
                                pOutData,                  /* Pointer to the output data */
                                NumSamples);               /* Number of samples to process */
-    
+
     /*
      * Call the equaliser
      */
@@ -239,7 +281,15 @@
 
     LVCS_Instance_t *pInstance = (LVCS_Instance_t  *)hInstance;
     LVCS_ReturnStatus_en err;
-
+#ifdef SUPPORT_MC
+    /*Extract number of Channels info*/
+    LVM_INT32 channels = pInstance->Params.NrChannels;
+#define NrFrames NumSamples  // alias for clarity
+    if (channels == 1)
+    {
+        channels = 2;
+    }
+#endif
     /*
      * Check the number of samples is not too large
      */
@@ -260,8 +310,8 @@
                                   pInData,
                                   pOutData,
                                   NumSamples);
-            
-            
+
+
         /*
          * Compress to reduce expansion effect of Concert Sound and correct volume
          * differences for difference settings. Not applied in test modes
@@ -376,17 +426,32 @@
                             (LVM_INT16)NumSamples);
             }
         }
+#ifdef SUPPORT_MC
+        Copy_Float_Stereo_Mc(pInData,
+                             pOutData,
+                             NrFrames,
+                             channels);
+#endif
     }
     else
     {
         if (pInData != pOutData)
         {
+#ifdef SUPPORT_MC
+            /*
+             * The algorithm is disabled so just copy the data
+             */
+            Copy_Float((LVM_FLOAT *)pInData,               /* Source */
+                       (LVM_FLOAT *)pOutData,                  /* Destination */
+                       (LVM_INT16)(channels * NrFrames));    /* All Channels*/
+#else
             /*
              * The algorithm is disabled so just copy the data
              */
             Copy_Float((LVM_FLOAT *)pInData,               /* Source */
                        (LVM_FLOAT *)pOutData,                  /* Destination */
                        (LVM_INT16)(2 * NumSamples));             /* Left and right */
+#endif
         }
     }
 
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
index e154e29..0765764 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Tables.c
@@ -515,16 +515,16 @@
 
 #if defined(BUILD_FLOAT) && defined(HIGHER_FS)
 const LVM_INT16 LVCS_VolumeTCTable[11] = {LVCS_VOL_TC_Fs8000,
-		                                  LVCS_VOL_TC_Fs11025,
-										  LVCS_VOL_TC_Fs12000,
-										  LVCS_VOL_TC_Fs16000,
-										  LVCS_VOL_TC_Fs22050,
-										  LVCS_VOL_TC_Fs24000,
-										  LVCS_VOL_TC_Fs32000,
-										  LVCS_VOL_TC_Fs44100,
-										  LVCS_VOL_TC_Fs48000,
-										  LVCS_VOL_TC_Fs96000,
-										  LVCS_VOL_TC_Fs192000
+                                          LVCS_VOL_TC_Fs11025,
+                                          LVCS_VOL_TC_Fs12000,
+                                          LVCS_VOL_TC_Fs16000,
+                                          LVCS_VOL_TC_Fs22050,
+                                          LVCS_VOL_TC_Fs24000,
+                                          LVCS_VOL_TC_Fs32000,
+                                          LVCS_VOL_TC_Fs44100,
+                                          LVCS_VOL_TC_Fs48000,
+                                          LVCS_VOL_TC_Fs96000,
+                                          LVCS_VOL_TC_Fs192000
 };
 #else
 const LVM_INT16 LVCS_VolumeTCTable[9] = {LVCS_VOL_TC_Fs8000,
@@ -546,16 +546,16 @@
 /************************************************************************************/
 #if defined(BUILD_FLOAT) && defined(HIGHER_FS)
 const LVM_INT32   LVCS_SampleRateTable[11] = {8000,
-		                                      11025,
-											  12000,
-											  16000,
-											  22050,
-											  24000,
-											  32000,
-											  44100,
-											  48000,
-											  96000,
-											  192000
+                                              11025,
+                                              12000,
+                                              16000,
+                                              22050,
+                                              24000,
+                                              32000,
+                                              44100,
+                                              48000,
+                                              96000,
+                                              192000
 };
 #else
 const LVM_INT16   LVCS_SampleRateTable[9] = {8000,
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
new file mode 100644
index 0000000..8ee807c
--- /dev/null
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -0,0 +1,46 @@
+// Build the unit tests for effects
+
+cc_test {
+    name: "lvmtest",
+    host_supported: false,
+    proprietary: true,
+
+    include_dirs: [
+        "frameworks/av/media/libeffects/lvm/lib/Bass/lib",
+        "frameworks/av/media/libeffects/lvm/lib/Bass/src",
+        "frameworks/av/media/libeffects/lvm/lib/Bundle/src",
+        "frameworks/av/media/libeffects/lvm/lib/Common/src",
+        "frameworks/av/media/libeffects/lvm/lib/Eq/lib",
+        "frameworks/av/media/libeffects/lvm/lib/Eq/src",
+        "frameworks/av/media/libeffects/lvm/lib/SpectrumAnalyzer/lib",
+        "frameworks/av/media/libeffects/lvm/lib/SpectrumAnalyzer/src",
+        "frameworks/av/media/libeffects/lvm/lib/StereoWidening/lib",
+        "frameworks/av/media/libeffects/lvm/lib/StereoWidening/src",
+        "frameworks/av/media/libeffects/lvm/wrapper/Bundle",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+
+    shared_libs: [
+        "libaudioutils",
+        "liblog",
+    ],
+
+    static_libs: [
+        "libmusicbundle",
+    ],
+
+    srcs: ["lvmtest.cpp"],
+
+    cflags: [
+        "-DBUILD_FLOAT",
+        "-DHIGHER_FS",
+        "-DSUPPORT_MC",
+
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..340469a
--- /dev/null
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Run tests in this directory.
+#
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+mm -j
+
+echo "waiting for device"
+
+adb root && adb wait-for-device remount
+
+# location of test files
+testdir="/data/local/tmp/lvmTest"
+
+#flags="-bE -tE -eqE -csE"
+flags="-csE -tE -eqE"
+
+
+echo "========================================"
+echo "testing lvm"
+adb shell mkdir $testdir
+adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw $testdir
+adb push $OUT/testcases/lvmtest/arm64/lvmtest $testdir
+
+# run multichannel effects at different channel counts, saving only the stereo channel pair.
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_1.raw\
+                          -ch:1 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_2.raw\
+                           -ch:2 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_4.raw\
+                           -ch:4 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_6.raw\
+                           -ch:6 -fs:44100 $flags
+adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_8.raw\
+                           -ch:8 -fs:44100 $flags
+
+# two channel files should be identical to higher channel computation (first 2 channels).
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_2.raw
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_4.raw
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_6.raw
+adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_8.raw
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
new file mode 100644
index 0000000..01c5955
--- /dev/null
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -0,0 +1,682 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <assert.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
+#include <log/log.h>
+
+#include "EffectBundle.h"
+#include "LVM_Private.h"
+
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) \
+  do {               \
+  } while (false)
+#endif
+
+#define CHECK_ARG(cond)                                \
+  {                                                    \
+    if (!(cond)) {                                     \
+      ALOGE("\tLVM_ERROR : Invalid argument: " #cond); \
+      return -EINVAL;                                  \
+    }                                                  \
+  \
+}
+
+#define LVM_ERROR_CHECK(LvmStatus, callingFunc, calledFunc)     \
+  {                                                             \
+    if ((LvmStatus) == LVM_NULLADDRESS) {                       \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "null pointer returned by %s in %s\n\n\n\n",          \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+    if ((LvmStatus) == LVM_ALIGNMENTERROR) {                    \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "bad alignment returned by %s in %s\n\n\n\n",         \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+    if ((LvmStatus) == LVM_INVALIDNUMSAMPLES) {                 \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "bad number of samples returned by %s in %s\n\n\n\n", \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+    if ((LvmStatus) == LVM_OUTOFRANGE) {                        \
+      ALOGE(                                                    \
+          "\tLVM_ERROR : Parameter error - "                    \
+          "out of range returned by %s in %s\n",                \
+          callingFunc, calledFunc);                             \
+    }                                                           \
+  }
+
+struct lvmConfigParams_t {
+  int              samplingFreq    = 44100;
+  int              nrChannels      = 2;
+  int              fChannels       = 2;
+  int              bassEffectLevel = 0;
+  int              eqPresetLevel   = 0;
+  int              frameLength     = 256;
+  LVM_BE_Mode_en   bassEnable      = LVM_BE_OFF;
+  LVM_TE_Mode_en   trebleEnable    = LVM_TE_OFF;
+  LVM_EQNB_Mode_en eqEnable        = LVM_EQNB_OFF;
+  LVM_Mode_en      csEnable        = LVM_MODE_OFF;
+};
+
+void printUsage() {
+  printf("\nUsage: ");
+  printf("\n     <exceutable> -i:<input_file> -o:<out_file> [options]\n");
+  printf("\nwhere, \n     <inputfile>  is the input file name");
+  printf("\n                  on which LVM effects are applied");
+  printf("\n     <outputfile> processed output file");
+  printf("\n     and options are mentioned below");
+  printf("\n");
+  printf("\n     -help (or) -h");
+  printf("\n           Prints this usage information");
+  printf("\n");
+  printf("\n     -ch:<process_channels> (1 through 8)\n\n");
+  printf("\n     -fch:<file_channels> (1 through 8)\n\n");
+  printf("\n     -basslvl:<effect_level>");
+  printf("\n           A value that ranges between 0 - 15 default 0");
+  printf("\n");
+  printf("\n     -eqPreset:<preset Value>");
+  printf("\n           0 - Normal");
+  printf("\n           1 - Classical");
+  printf("\n           2 - Dance");
+  printf("\n           3 - Flat");
+  printf("\n           4 - Folk");
+  printf("\n           5 - Heavy Metal");
+  printf("\n           6 - Hip Hop");
+  printf("\n           7 - Jazz");
+  printf("\n           8 - Pop");
+  printf("\n           9 - Rock");
+  printf("\n           default 0");
+  printf("\n     -bE ");
+  printf("\n           Enable Dynamic Bass Enhancement");
+  printf("\n");
+  printf("\n     -tE ");
+  printf("\n           Enable Treble Boost");
+  printf("\n");
+  printf("\n     -csE ");
+  printf("\n           Enable Concert Surround");
+  printf("\n");
+  printf("\n     -eqE ");
+  printf("\n           Enable Equalizer");
+}
+
+//----------------------------------------------------------------------------
+// LvmEffect_free()
+//----------------------------------------------------------------------------
+// Purpose: Free all memory associated with the Bundle.
+//
+// Inputs:
+//  pContext:   effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void LvmEffect_free(struct EffectContext *pContext) {
+  LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
+  LVM_MemTab_t MemTab;
+
+  /* Free the algorithm memory */
+  LvmStatus = LVM_GetMemoryTable(pContext->pBundledContext->hInstance, &MemTab,
+                                 LVM_NULL);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmEffect_free")
+
+  for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
+    if (MemTab.Region[i].Size != 0) {
+      if (MemTab.Region[i].pBaseAddress != NULL) {
+        ALOGV("\tLvmEffect_free - START freeing %" PRIu32
+              " bytes for region %u at %p\n",
+              MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+
+        free(MemTab.Region[i].pBaseAddress);
+
+        ALOGV("\tLvmEffect_free - END   freeing %" PRIu32
+              " bytes for region %u at %p\n",
+              MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+      } else {
+        ALOGE(
+            "\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer "
+            "%" PRIu32 " bytes for region %u at %p ERROR\n",
+            MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+      }
+    }
+  }
+} /* end LvmEffect_free */
+
+//----------------------------------------------------------------------------
+// LvmBundle_init()
+//----------------------------------------------------------------------------
+// Purpose: Initialize engine with default configuration, creates instance
+// with all effects disabled.
+//
+// Inputs:
+//  pContext:   effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int LvmBundle_init(struct EffectContext *pContext, LVM_ControlParams_t *params) {
+  ALOGV("\tLvmBundle_init start");
+
+  pContext->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+  pContext->config.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+  pContext->config.inputCfg.format = EFFECT_BUFFER_FORMAT;
+  pContext->config.inputCfg.samplingRate = 44100;
+  pContext->config.inputCfg.bufferProvider.getBuffer = NULL;
+  pContext->config.inputCfg.bufferProvider.releaseBuffer = NULL;
+  pContext->config.inputCfg.bufferProvider.cookie = NULL;
+  pContext->config.inputCfg.mask = EFFECT_CONFIG_ALL;
+  pContext->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+  pContext->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+  pContext->config.outputCfg.format = EFFECT_BUFFER_FORMAT;
+  pContext->config.outputCfg.samplingRate = 44100;
+  pContext->config.outputCfg.bufferProvider.getBuffer = NULL;
+  pContext->config.outputCfg.bufferProvider.releaseBuffer = NULL;
+  pContext->config.outputCfg.bufferProvider.cookie = NULL;
+  pContext->config.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+  if (pContext->pBundledContext->hInstance != NULL) {
+    ALOGV(
+        "\tLvmBundle_init pContext->pBassBoost != NULL "
+        "-> Calling pContext->pBassBoost->free()");
+
+    LvmEffect_free(pContext);
+
+    ALOGV(
+        "\tLvmBundle_init pContext->pBassBoost != NULL "
+        "-> Called pContext->pBassBoost->free()");
+  }
+
+  LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
+  LVM_InstParams_t InstParams;                 /* Instance parameters */
+  LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS];  /* Equaliser band definitions */
+  LVM_HeadroomParams_t HeadroomParams;         /* Headroom parameters */
+  LVM_HeadroomBandDef_t HeadroomBandDef[LVM_HEADROOM_MAX_NBANDS];
+  LVM_MemTab_t MemTab; /* Memory allocation table */
+  bool bMallocFailure = LVM_FALSE;
+
+  /* Set the capabilities */
+  InstParams.BufferMode = LVM_UNMANAGED_BUFFERS;
+  InstParams.MaxBlockSize = MAX_CALL_SIZE;
+  InstParams.EQNB_NumBands = MAX_NUM_BANDS;
+  InstParams.PSA_Included = LVM_PSA_ON;
+
+  /* Allocate memory, forcing alignment */
+  LvmStatus = LVM_GetMemoryTable(LVM_NULL, &MemTab, &InstParams);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV("\tCreateInstance Succesfully called LVM_GetMemoryTable\n");
+
+  /* Allocate memory */
+  for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
+    if (MemTab.Region[i].Size != 0) {
+      MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
+
+      if (MemTab.Region[i].pBaseAddress == LVM_NULL) {
+        ALOGE(
+            "\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate "
+            "%" PRIu32 " bytes for region %u\n",
+            MemTab.Region[i].Size, i);
+        bMallocFailure = LVM_TRUE;
+        break;
+      } else {
+        ALOGV("\tLvmBundle_init CreateInstance allocated %" PRIu32
+              " bytes for region %u at %p\n",
+              MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+      }
+    }
+  }
+
+  /* If one or more of the memory regions failed to allocate, free the regions
+   * that were
+   * succesfully allocated and return with an error
+   */
+  if (bMallocFailure == LVM_TRUE) {
+    for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
+      if (MemTab.Region[i].pBaseAddress == LVM_NULL) {
+        ALOGE(
+            "\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate "
+            "%" PRIu32 " bytes for region %u Not freeing\n",
+            MemTab.Region[i].Size, i);
+      } else {
+        ALOGE(
+            "\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated "
+            "%" PRIu32 " bytes for region %u at %p- free\n",
+            MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
+        free(MemTab.Region[i].pBaseAddress);
+      }
+    }
+    return -EINVAL;
+  }
+  ALOGV("\tLvmBundle_init CreateInstance Succesfully malloc'd memory\n");
+
+  /* Initialise */
+  pContext->pBundledContext->hInstance = LVM_NULL;
+
+  /* Init sets the instance handle */
+  LvmStatus = LVM_GetInstanceHandle(&pContext->pBundledContext->hInstance,
+                                    &MemTab, &InstParams);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_GetInstanceHandle", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV(
+      "\tLvmBundle_init CreateInstance Succesfully called "
+      "LVM_GetInstanceHandle\n");
+
+  /* Set the initial process parameters */
+  /* General parameters */
+  params->OperatingMode = LVM_MODE_ON;
+  params->SampleRate = LVM_FS_44100;
+  params->SourceFormat = LVM_STEREO;
+  params->SpeakerType = LVM_HEADPHONES;
+
+  pContext->pBundledContext->SampleRate = LVM_FS_44100;
+
+  /* Concert Sound parameters */
+  params->VirtualizerOperatingMode = LVM_MODE_OFF;
+  params->VirtualizerType = LVM_CONCERTSOUND;
+  params->VirtualizerReverbLevel = 100;
+  params->CS_EffectLevel = LVM_CS_EFFECT_NONE;
+
+  /* N-Band Equaliser parameters */
+  params->EQNB_OperatingMode = LVM_EQNB_ON;
+  params->EQNB_NBands = FIVEBAND_NUMBANDS;
+  params->pEQNB_BandDefinition = &BandDefs[0];
+
+  for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+    BandDefs[i].Frequency = EQNB_5BandPresetsFrequencies[i];
+    BandDefs[i].QFactor = EQNB_5BandPresetsQFactors[i];
+    BandDefs[i].Gain = EQNB_5BandSoftPresets[i];
+  }
+
+  /* Volume Control parameters */
+  params->VC_EffectLevel = 0;
+  params->VC_Balance = 0;
+
+  /* Treble Enhancement parameters */
+  params->TE_OperatingMode = LVM_TE_OFF;
+  params->TE_EffectLevel = 0;
+
+  /* PSA Control parameters */
+  params->PSA_Enable = LVM_PSA_OFF;
+  params->PSA_PeakDecayRate = (LVM_PSA_DecaySpeed_en)0;
+
+  /* Bass Enhancement parameters */
+  params->BE_OperatingMode = LVM_BE_ON;
+  params->BE_EffectLevel = 0;
+  params->BE_CentreFreq = LVM_BE_CENTRE_90Hz;
+  params->BE_HPF = LVM_BE_HPF_ON;
+
+  /* PSA Control parameters */
+  params->PSA_Enable = LVM_PSA_OFF;
+  params->PSA_PeakDecayRate = LVM_PSA_SPEED_MEDIUM;
+
+  /* TE Control parameters */
+  params->TE_OperatingMode = LVM_TE_OFF;
+  params->TE_EffectLevel = 0;
+
+  /* Activate the initial settings */
+  LvmStatus =
+      LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV(
+      "\tLvmBundle_init CreateInstance Succesfully called "
+      "LVM_SetControlParameters\n");
+
+  /* Set the headroom parameters */
+  HeadroomBandDef[0].Limit_Low = 20;
+  HeadroomBandDef[0].Limit_High = 4999;
+  HeadroomBandDef[0].Headroom_Offset = 0;
+  HeadroomBandDef[1].Limit_Low = 5000;
+  HeadroomBandDef[1].Limit_High = 24000;
+  HeadroomBandDef[1].Headroom_Offset = 0;
+  HeadroomParams.pHeadroomDefinition = &HeadroomBandDef[0];
+  HeadroomParams.Headroom_OperatingMode = LVM_HEADROOM_ON;
+  HeadroomParams.NHeadroomBands = 2;
+
+  LvmStatus = LVM_SetHeadroomParams(pContext->pBundledContext->hInstance,
+                                    &HeadroomParams);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_SetHeadroomParams", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  ALOGV(
+      "\tLvmBundle_init CreateInstance Succesfully called "
+      "LVM_SetHeadroomParams\n");
+  ALOGV("\tLvmBundle_init End");
+  return 0;
+} /* end LvmBundle_init */
+
+int lvmCreate(struct EffectContext *pContext,
+              lvmConfigParams_t    *plvmConfigParams,
+              LVM_ControlParams_t  *params) {
+  int ret = 0;
+  pContext->pBundledContext = NULL;
+  pContext->pBundledContext = (BundledEffectContext *)malloc(sizeof(struct BundledEffectContext));
+  if (NULL == pContext->pBundledContext) {
+    return -EINVAL;
+  }
+
+  pContext->pBundledContext->SessionNo = 0;
+  pContext->pBundledContext->SessionId = 0;
+  pContext->pBundledContext->hInstance = NULL;
+  pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
+  pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
+  pContext->pBundledContext->bBassEnabled = LVM_FALSE;
+  pContext->pBundledContext->bBassTempDisabled = LVM_FALSE;
+  pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
+  pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
+  pContext->pBundledContext->nOutputDevice = AUDIO_DEVICE_NONE;
+  pContext->pBundledContext->nVirtualizerForcedDevice = AUDIO_DEVICE_NONE;
+  pContext->pBundledContext->NumberEffectsEnabled = 0;
+  pContext->pBundledContext->NumberEffectsCalled = 0;
+  pContext->pBundledContext->firstVolume = LVM_TRUE;
+  pContext->pBundledContext->volume = 0;
+
+  /* Saved strength is used to return the exact strength that was used in the
+   * set to the get
+   * because we map the original strength range of 0:1000 to 1:15, and this will
+   * avoid
+   * quantisation like effect when returning
+   */
+  pContext->pBundledContext->BassStrengthSaved = 0;
+  pContext->pBundledContext->VirtStrengthSaved = 0;
+  pContext->pBundledContext->CurPreset = PRESET_CUSTOM;
+  pContext->pBundledContext->levelSaved = 0;
+  pContext->pBundledContext->bMuteEnabled = LVM_FALSE;
+  pContext->pBundledContext->bStereoPositionEnabled = LVM_FALSE;
+  pContext->pBundledContext->positionSaved = 0;
+  pContext->pBundledContext->workBuffer = NULL;
+  pContext->pBundledContext->frameCount = -1;
+  pContext->pBundledContext->SamplesToExitCountVirt = 0;
+  pContext->pBundledContext->SamplesToExitCountBb = 0;
+  pContext->pBundledContext->SamplesToExitCountEq = 0;
+#if defined(BUILD_FLOAT) && !defined(NATIVE_FLOAT_BUFFER)
+  pContext->pBundledContext->pInputBuffer = NULL;
+  pContext->pBundledContext->pOutputBuffer = NULL;
+#endif
+  for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+    pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
+  }
+  pContext->config.inputCfg.channels = plvmConfigParams->nrChannels;
+  ALOGV("\tEffectCreate - Calling LvmBundle_init");
+  ret = LvmBundle_init(pContext, params);
+
+  if (ret < 0) {
+    ALOGE("\tLVM_ERROR : lvmCreate() Bundle init failed");
+    return ret;
+  }
+  return 0;
+}
+
+int lvmControl(struct EffectContext *pContext,
+               lvmConfigParams_t    *plvmConfigParams,
+               LVM_ControlParams_t  *params) {
+  LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
+  LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS];  /* Equaliser band definitions */
+  int eqPresetLevel = plvmConfigParams->eqPresetLevel;
+  int nrChannels = plvmConfigParams->nrChannels;
+  params->NrChannels = nrChannels;
+
+  /* Set the initial process parameters */
+  /* General parameters */
+  params->OperatingMode = LVM_MODE_ON;
+  params->SampleRate = LVM_FS_44100;
+  params->SourceFormat = LVM_STEREO;
+  params->SpeakerType = LVM_HEADPHONES;
+
+  pContext->pBundledContext->SampleRate = LVM_FS_44100;
+
+  /* Concert Sound parameters */
+  params->VirtualizerOperatingMode = plvmConfigParams->csEnable;
+  params->VirtualizerType = LVM_CONCERTSOUND;
+  params->VirtualizerReverbLevel = 100;
+  params->CS_EffectLevel = LVM_CS_EFFECT_NONE;
+
+  /* N-Band Equaliser parameters */
+  params->EQNB_OperatingMode = plvmConfigParams->eqEnable;
+  params->pEQNB_BandDefinition = &BandDefs[0];
+  for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+    BandDefs[i].Frequency = EQNB_5BandPresetsFrequencies[i];
+    BandDefs[i].QFactor = EQNB_5BandPresetsQFactors[i];
+    BandDefs[i].Gain =
+        EQNB_5BandSoftPresets[(FIVEBAND_NUMBANDS * eqPresetLevel) + i];
+  }
+
+  /* Volume Control parameters */
+  params->VC_EffectLevel = 0;
+  params->VC_Balance = 0;
+
+  /* Treble Enhancement parameters */
+  params->TE_OperatingMode = plvmConfigParams->trebleEnable;
+
+  /* PSA Control parameters */
+  params->PSA_Enable = LVM_PSA_ON;
+
+  /* Bass Enhancement parameters */
+  params->BE_OperatingMode = plvmConfigParams->bassEnable;
+
+  if (nrChannels == 1) {
+    params->SourceFormat = LVM_MONO;
+  }
+  if (nrChannels == 2) {
+    params->SourceFormat = LVM_STEREO;
+  }
+  if ((nrChannels > 2) && (nrChannels <= 8)) {
+    params->SourceFormat = LVM_MULTICHANNEL;
+  }
+
+  /* Activate the initial settings */
+  LvmStatus =
+      LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
+
+  LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "LvmBundle_init");
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  LvmStatus = LVM_ApplyNewSettings(pContext->pBundledContext->hInstance);
+
+  if (LvmStatus != LVM_SUCCESS) return -EINVAL;
+
+  return 0;
+}
+
+int lvmExecute(float *floatIn, float *floatOut, struct EffectContext *pContext,
+               lvmConfigParams_t *plvmConfigParams) {
+  const int frameLength = plvmConfigParams->frameLength;
+  return
+      LVM_Process(pContext->pBundledContext->hInstance, /* Instance handle */
+                  floatIn,                              /* Input buffer */
+                  floatOut,                             /* Output buffer */
+                  (LVM_UINT16)frameLength, /* Number of samples to read */
+                  0);                      /* Audio Time */
+}
+
+int lvmMainProcess(lvmConfigParams_t *plvmConfigParams, FILE *finp, FILE *fout) {
+  struct EffectContext context;
+  LVM_ControlParams_t params;
+
+  int errCode = lvmCreate(&context, plvmConfigParams, &params);
+  if (errCode) {
+    ALOGE("Error: lvmCreate returned with %d\n", errCode);
+    return errCode;
+  }
+
+  errCode = lvmControl(&context, plvmConfigParams, &params);
+  if (errCode) {
+    ALOGE("Error: lvmControl returned with %d\n", errCode);
+    return errCode;
+  }
+
+  const int channelCount = plvmConfigParams->nrChannels;
+  const int frameLength = plvmConfigParams->frameLength;
+  const int frameSize = channelCount * sizeof(float);  // processing size
+  const int ioChannelCount = plvmConfigParams->fChannels;
+  const int ioFrameSize = ioChannelCount * sizeof(short); // file load size
+  const int maxChannelCount = std::max(channelCount, ioChannelCount);
+  /*
+   * Mono input will be converted to 2 channels internally in the process call
+   * by copying the same data into the second channel.
+   * Hence when channelCount is 1, output buffer should be allocated for
+   * 2 channels. The memAllocChCount takes care of allocation of sufficient
+   * memory for the output buffer.
+   */
+  const int memAllocChCount = (channelCount == 1 ? 2 : channelCount);
+
+  std::vector<short> in(frameLength * maxChannelCount);
+  std::vector<short> out(frameLength * maxChannelCount);
+  std::vector<float> floatIn(frameLength * channelCount);
+  std::vector<float> floatOut(frameLength * memAllocChCount);
+
+  int frameCounter = 0;
+  while (fread(in.data(), ioFrameSize, frameLength, finp) == (size_t)frameLength) {
+    if (ioChannelCount != channelCount) {
+        adjust_channels(in.data(), ioChannelCount, in.data(), channelCount,
+               sizeof(short), frameLength * ioFrameSize);
+    }
+    memcpy_to_float_from_i16(floatIn.data(), in.data(), frameLength * channelCount);
+
+#if 1
+    errCode = lvmExecute(floatIn.data(), floatOut.data(), &context, plvmConfigParams);
+    if (errCode) {
+      printf("\nError: lvmExecute returned with %d\n", errCode);
+      return errCode;
+    }
+
+    (void)frameSize; // eliminate warning
+#else
+    memcpy(floatOut.data(), floatIn.data(), frameLength * frameSize);
+#endif
+    memcpy_to_i16_from_float(out.data(), floatOut.data(), frameLength * channelCount);
+    if (ioChannelCount != channelCount) {
+        adjust_channels(out.data(), channelCount, out.data(), ioChannelCount,
+               sizeof(short), frameLength * channelCount * sizeof(short));
+    }
+    (void) fwrite(out.data(), ioFrameSize, frameLength, fout);
+    frameCounter += frameLength;
+  }
+  printf("frameCounter: [%d]\n", frameCounter);
+  return 0;
+}
+
+int main(int argc, const char *argv[]) {
+  if (argc == 1) {
+    printUsage();
+    return -1;
+  }
+
+  lvmConfigParams_t lvmConfigParams{}; // default initialize
+  FILE *finp = nullptr, *fout = nullptr;
+
+  for (int i = 1; i < argc; i++) {
+    printf("%s ", argv[i]);
+    if (!strncmp(argv[i], "-i:", 3)) {
+      finp = fopen(argv[i] + 3, "rb");
+    } else if (!strncmp(argv[i], "-o:", 3)) {
+      fout = fopen(argv[i] + 3, "wb");
+    } else if (!strncmp(argv[i], "-fs:", 4)) {
+      const int samplingFreq = atoi(argv[i] + 4);
+      if (samplingFreq != 8000 && samplingFreq != 11025 &&
+          samplingFreq != 12000 && samplingFreq != 16000 &&
+          samplingFreq != 22050 && samplingFreq != 24000 &&
+          samplingFreq != 32000 && samplingFreq != 44100 &&
+          samplingFreq != 48000 && samplingFreq != 96000) {
+        ALOGE("\nError: Unsupported Sampling Frequency : %d\n", samplingFreq);
+        return -1;
+      }
+      lvmConfigParams.samplingFreq = samplingFreq;
+    } else if (!strncmp(argv[i], "-ch:", 4)) {
+      const int nrChannels = atoi(argv[i] + 4);
+      if (nrChannels > 8 || nrChannels < 1) {
+        ALOGE("\nError: Unsupported number of channels : %d\n", nrChannels);
+        return -1;
+      }
+      lvmConfigParams.nrChannels = nrChannels;
+    } else if (!strncmp(argv[i], "-fch:", 5)) {
+      const int fChannels = atoi(argv[i] + 5);
+      if (fChannels > 8 || fChannels < 1) {
+             ALOGE("\nError: Unsupported number of file channels : %d\n", fChannels);
+             return -1;
+           }
+           lvmConfigParams.fChannels = fChannels;
+    } else if (!strncmp(argv[i], "-basslvl:", 9)) {
+      const int bassEffectLevel = atoi(argv[i] + 9);
+      if (bassEffectLevel > 15 || bassEffectLevel < 0) {
+        ALOGE("\nError: Unsupported Bass Effect Level : %d\n",
+               bassEffectLevel);
+        printUsage();
+        return -1;
+      }
+      lvmConfigParams.bassEffectLevel = bassEffectLevel;
+    } else if (!strncmp(argv[i], "-eqPreset:", 10)) {
+      const int eqPresetLevel = atoi(argv[i] + 10);
+      if (eqPresetLevel > 9 || eqPresetLevel < 0) {
+        ALOGE("\nError: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
+        printUsage();
+        return -1;
+      }
+      lvmConfigParams.eqPresetLevel = eqPresetLevel;
+    } else if (!strcmp(argv[i], "-bE")) {
+      lvmConfigParams.bassEnable = LVM_BE_ON;
+    } else if (!strcmp(argv[i], "-eqE")) {
+      lvmConfigParams.eqEnable = LVM_EQNB_ON;
+    } else if (!strcmp(argv[i], "-tE")) {
+      lvmConfigParams.trebleEnable = LVM_TE_ON;
+    } else if (!strcmp(argv[i], "-csE")) {
+      lvmConfigParams.csEnable = LVM_MODE_ON;
+    } else if (!strcmp(argv[i], "-h")) {
+      printUsage();
+      return 0;
+    }
+  }
+
+  if (finp == nullptr || fout == nullptr) {
+    ALOGE("\nError: missing input/output files\n");
+    printUsage();
+    // ok not to close.
+    return -1;
+  }
+
+  const int errCode = lvmMainProcess(&lvmConfigParams, finp, fout);
+  fclose(finp);
+  fclose(fout);
+
+  if (errCode) {
+    ALOGE("Error: lvmMainProcess returns with the error: %d \n", errCode);
+    return -1;
+  }
+  return 0;
+}
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index 10fd970..16fa126 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -18,6 +18,7 @@
         "-fvisibility=hidden",
         "-DBUILD_FLOAT",
         "-DHIGHER_FS",
+        "-DSUPPORT_MC",
 
         "-Wall",
         "-Werror",
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 53d266a..09e9964 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -435,7 +435,7 @@
             (pSessionContext->bEqualizerInstantiated ==LVM_FALSE) &&
             (pSessionContext->bVirtualizerInstantiated==LVM_FALSE))
     {
-        #ifdef LVM_PCM
+#ifdef LVM_PCM
         if (pContext->pBundledContext->PcmInPtr != NULL) {
             fclose(pContext->pBundledContext->PcmInPtr);
             pContext->pBundledContext->PcmInPtr = NULL;
@@ -444,7 +444,7 @@
             fclose(pContext->pBundledContext->PcmOutPtr);
             pContext->pBundledContext->PcmOutPtr = NULL;
         }
-        #endif
+#endif
 
 
         // Clear the SessionIndex
@@ -751,19 +751,21 @@
 
     LVM_ReturnStatus_en     LvmStatus = LVM_SUCCESS;                /* Function call status */
     effect_buffer_t         *pOutTmp;
+    const LVM_INT32 NrChannels =
+        audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
 #ifndef NATIVE_FLOAT_BUFFER
     if (pContext->pBundledContext->pInputBuffer == nullptr ||
             pContext->pBundledContext->frameCount < frameCount) {
         free(pContext->pBundledContext->pInputBuffer);
         pContext->pBundledContext->pInputBuffer =
-                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * FCC_2);
+                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * NrChannels);
     }
 
     if (pContext->pBundledContext->pOutputBuffer == nullptr ||
             pContext->pBundledContext->frameCount < frameCount) {
         free(pContext->pBundledContext->pOutputBuffer);
         pContext->pBundledContext->pOutputBuffer =
-                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * FCC_2);
+                (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * NrChannels);
     }
 
     if (pContext->pBundledContext->pInputBuffer == nullptr ||
@@ -784,7 +786,7 @@
                 free(pContext->pBundledContext->workBuffer);
             }
             pContext->pBundledContext->workBuffer =
-                    (effect_buffer_t *)calloc(frameCount, sizeof(effect_buffer_t) * FCC_2);
+                    (effect_buffer_t *)calloc(frameCount, sizeof(effect_buffer_t) * NrChannels);
             if (pContext->pBundledContext->workBuffer == NULL) {
                 return -ENOMEM;
             }
@@ -798,13 +800,15 @@
 
 #ifdef LVM_PCM
     fwrite(pIn,
-            frameCount*sizeof(effect_buffer_t) * FCC_2, 1, pContext->pBundledContext->PcmInPtr);
+           frameCount * sizeof(effect_buffer_t) * NrChannels,
+           1,
+           pContext->pBundledContext->PcmInPtr);
     fflush(pContext->pBundledContext->PcmInPtr);
 #endif
 
 #ifndef NATIVE_FLOAT_BUFFER
     /* Converting input data from fixed point to float point */
-    memcpy_to_float_from_i16(pInputBuff, pIn, frameCount * FCC_2);
+    memcpy_to_float_from_i16(pInputBuff, pIn, frameCount * NrChannels);
 
     /* Process the samples */
     LvmStatus = LVM_Process(pContext->pBundledContext->hInstance, /* Instance handle */
@@ -814,7 +818,7 @@
                             0);                                   /* Audio Time */
 
     /* Converting output data from float point to fixed point */
-    memcpy_to_i16_from_float(pOutTmp, pOutputBuff, frameCount * FCC_2);
+    memcpy_to_i16_from_float(pOutTmp, pOutputBuff, frameCount * NrChannels);
 
 #else
     /* Process the samples */
@@ -829,12 +833,14 @@
 
 #ifdef LVM_PCM
     fwrite(pOutTmp,
-            frameCount*sizeof(effect_buffer_t) * FCC_2, 1, pContext->pBundledContext->PcmOutPtr);
+           frameCount * sizeof(effect_buffer_t) * NrChannels,
+           1,
+           pContext->pBundledContext->PcmOutPtr);
     fflush(pContext->pBundledContext->PcmOutPtr);
 #endif
 
     if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
-        for (int i = 0; i < frameCount * FCC_2; i++) {
+        for (int i = 0; i < frameCount * NrChannels; i++) {
 #ifndef NATIVE_FLOAT_BUFFER
             pOut[i] = clamp16((LVM_INT32)pOut[i] + (LVM_INT32)pOutTmp[i]);
 #else
@@ -1232,45 +1238,50 @@
     CHECK_ARG(pConfig->inputCfg.samplingRate == pConfig->outputCfg.samplingRate);
     CHECK_ARG(pConfig->inputCfg.channels == pConfig->outputCfg.channels);
     CHECK_ARG(pConfig->inputCfg.format == pConfig->outputCfg.format);
+#ifdef SUPPORT_MC
+    CHECK_ARG(audio_channel_count_from_out_mask(pConfig->inputCfg.channels) <= LVM_MAX_CHANNELS);
+#else
     CHECK_ARG(pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO);
+#endif
     CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
               || pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
     CHECK_ARG(pConfig->inputCfg.format == EFFECT_BUFFER_FORMAT);
     pContext->config = *pConfig;
+    const LVM_INT16 NrChannels = audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
 
     switch (pConfig->inputCfg.samplingRate) {
     case 8000:
         SampleRate = LVM_FS_8000;
-        pContext->pBundledContext->SamplesPerSecond = 8000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 8000 * NrChannels;
         break;
     case 16000:
         SampleRate = LVM_FS_16000;
-        pContext->pBundledContext->SamplesPerSecond = 16000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 16000 * NrChannels;
         break;
     case 22050:
         SampleRate = LVM_FS_22050;
-        pContext->pBundledContext->SamplesPerSecond = 22050*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 22050 * NrChannels;
         break;
     case 32000:
         SampleRate = LVM_FS_32000;
-        pContext->pBundledContext->SamplesPerSecond = 32000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 32000 * NrChannels;
         break;
     case 44100:
         SampleRate = LVM_FS_44100;
-        pContext->pBundledContext->SamplesPerSecond = 44100*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 44100 * NrChannels;
         break;
     case 48000:
         SampleRate = LVM_FS_48000;
-        pContext->pBundledContext->SamplesPerSecond = 48000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 48000 * NrChannels;
         break;
 #if defined(BUILD_FLOAT) && defined(HIGHER_FS)
     case 96000:
         SampleRate = LVM_FS_96000;
-        pContext->pBundledContext->SamplesPerSecond = 96000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 96000 * NrChannels;
         break;
     case 192000:
         SampleRate = LVM_FS_192000;
-        pContext->pBundledContext->SamplesPerSecond = 192000*2; // 2 secs Stereo
+        pContext->pBundledContext->SamplesPerSecond = 192000 * NrChannels;
         break;
 #endif
     default:
@@ -1294,6 +1305,10 @@
 
         ActiveParams.SampleRate = SampleRate;
 
+#ifdef SUPPORT_MC
+        ActiveParams.NrChannels = NrChannels;
+#endif
+
         LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
 
         LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig")
@@ -1498,6 +1513,7 @@
     case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
     case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
     case AUDIO_DEVICE_OUT_USB_HEADSET:
+    // case AUDIO_DEVICE_OUT_USB_DEVICE:  // For USB testing of the virtualizer only.
         return 0;
     default :
         return -EINVAL;
@@ -1520,10 +1536,9 @@
 int VirtualizerIsConfigurationSupported(audio_channel_mask_t channelMask,
         audio_devices_t deviceType) {
     uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
-    if ((channelCount == 0) || (channelCount > 2)) {
+    if (channelCount < 1 || channelCount > LVM_MAX_CHANNELS) {
         return -EINVAL;
     }
-
     return VirtualizerIsDeviceSupported(deviceType);
 }
 
@@ -3216,6 +3231,7 @@
     EffectContext * pContext = (EffectContext *) self;
     int    status = 0;
     int    processStatus = 0;
+    const int NrChannels = audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
 
 //ALOGV("\tEffect_process Start : Enabled = %d     Called = %d (%8d %8d %8d)",
 //pContext->pBundledContext->NumberEffectsEnabled,pContext->pBundledContext->NumberEffectsCalled,
@@ -3246,7 +3262,7 @@
         (pContext->EffectType == LVM_BASS_BOOST)){
         //ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
         if(pContext->pBundledContext->SamplesToExitCountBb > 0){
-            pContext->pBundledContext->SamplesToExitCountBb -= outBuffer->frameCount * 2; // STEREO
+            pContext->pBundledContext->SamplesToExitCountBb -= outBuffer->frameCount * NrChannels;
             //ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountBb);
         }
@@ -3266,7 +3282,7 @@
         (pContext->EffectType == LVM_EQUALIZER)){
         //ALOGV("\tEffect_process() LVM_EQUALIZER Effect is not enabled");
         if(pContext->pBundledContext->SamplesToExitCountEq > 0){
-            pContext->pBundledContext->SamplesToExitCountEq -= outBuffer->frameCount * 2; // STEREO
+            pContext->pBundledContext->SamplesToExitCountEq -= outBuffer->frameCount * NrChannels;
             //ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountEq);
         }
@@ -3280,7 +3296,8 @@
         (pContext->EffectType == LVM_VIRTUALIZER)){
         //ALOGV("\tEffect_process() LVM_VIRTUALIZER Effect is not enabled");
         if(pContext->pBundledContext->SamplesToExitCountVirt > 0){
-            pContext->pBundledContext->SamplesToExitCountVirt -= outBuffer->frameCount * 2;// STEREO
+            pContext->pBundledContext->SamplesToExitCountVirt -=
+                outBuffer->frameCount * NrChannels;
             //ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountVirt);
         }
@@ -3331,7 +3348,7 @@
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
 
         if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
-            for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+            for (size_t i = 0; i < outBuffer->frameCount * NrChannels; ++i) {
 #ifdef NATIVE_FLOAT_BUFFER
                 outBuffer->f32[i] += inBuffer->f32[i];
 #else
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index e0f5a40..08c6a50 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -48,7 +48,8 @@
     {"amrwb",  AUDIO_ENCODER_AMR_WB},
     {"aac",    AUDIO_ENCODER_AAC},
     {"heaac",  AUDIO_ENCODER_HE_AAC},
-    {"aaceld", AUDIO_ENCODER_AAC_ELD}
+    {"aaceld", AUDIO_ENCODER_AAC_ELD}, 
+    {"opus",   AUDIO_ENCODER_OPUS}
 };
 
 const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 514c795..fb861d7 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -233,6 +233,12 @@
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1POINT2),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1POINT4),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_HAPTIC_A),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO_HAPTIC_A),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_HAPTIC_AB),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB),
     TERMINATOR
 };
 
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index d8b0fe7..bdf1aae 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -67,7 +67,7 @@
     OUTPUT_FORMAT_AAC_ADTS = 6,
 
     OUTPUT_FORMAT_AUDIO_ONLY_END = 7, // Used in validating the output format.  Should be the
-                                      //  at the end of the audio only output formats.
+                                      // at the end of the audio only output formats.
 
     /* Stream over a socket, limited to a single stream */
     OUTPUT_FORMAT_RTP_AVP = 7,
@@ -81,6 +81,9 @@
     /* HEIC data in a HEIF container */
     OUTPUT_FORMAT_HEIF = 10,
 
+    /* Opus data in a OGG container */
+    OUTPUT_FORMAT_OGG = 11,
+
     OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
 };
 
@@ -92,6 +95,7 @@
     AUDIO_ENCODER_HE_AAC = 4,
     AUDIO_ENCODER_AAC_ELD = 5,
     AUDIO_ENCODER_VORBIS = 6,
+    AUDIO_ENCODER_OPUS = 7,
 
     AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
 };
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index 7c2191b..a01afa3 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -439,31 +439,16 @@
 
 size_t JAudioTrack::frameSize() {
     JNIEnv *env = JavaVMHelper::getJNIEnv();
-
-    // TODO: Calculated here implementing the logic in AudioTrack.java
-    // wait for AudioTrack.java exposing this parameter (i.e. getFrameSizeInBtytes())
-    jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
-    int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
+    jmethodID jGetFormat = env->GetMethodID(mAudioTrackCls,
+            "getFormat", "()Landroid/media/AudioFormat;");
+    jobject jAudioFormatObj = env->CallObjectMethod(mAudioTrackObj, jGetFormat);
 
     jclass jAudioFormatCls = env->FindClass("android/media/AudioFormat");
-    jmethodID jIsEncodingLinearFrames = env->GetStaticMethodID(
-            jAudioFormatCls, "isEncodingLinearFrames", "(I)Z");
-    jboolean javaIsEncodingLinearFrames = env->CallStaticBooleanMethod(
-            jAudioFormatCls, jIsEncodingLinearFrames, javaFormat);
+    jmethodID jGetFrameSizeInBytes = env->GetMethodID(
+            jAudioFormatCls, "getFrameSizeInBytes", "()I");
+    jint javaFrameSizeInBytes = env->CallIntMethod(jAudioFormatObj, jGetFrameSizeInBytes);
 
-    if (javaIsEncodingLinearFrames == false) {
-        return 1;
-    }
-
-    jmethodID jGetBytesPerSample = env->GetStaticMethodID(jAudioFormatCls,
-            "getBytesPerSample", "(I)I");
-    int javaBytesPerSample = env->CallStaticIntMethod(jAudioFormatCls,
-            jGetBytesPerSample, javaFormat);
-
-    jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
-    int javaChannelCount = env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
-
-    return javaChannelCount * javaBytesPerSample;
+    return (size_t)javaFrameSizeInBytes;
 }
 
 status_t JAudioTrack::dump(int fd, const Vector<String16>& args __unused) const
diff --git a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
index 2ea55f6..e53900b 100644
--- a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
@@ -273,10 +273,10 @@
 
         if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
             notify->post();
-            msg->post(delayUs > 0ll ? delayUs : 0ll);
+            msg->post(delayUs > 0LL ? delayUs : 0LL);
             return;
         } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
-            if (delayUs < -1000000ll) { // 1 second
+            if (delayUs < -1000000LL) { // 1 second
                 continue;
             }
             notify->post();
@@ -288,7 +288,7 @@
     }
 
     // try again in 1 second
-    msg->post(1000000ll);
+    msg->post(1000000LL);
 }
 
 void NuPlayer2::HTTPLiveSource2::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 1561850..81ffbc7 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -926,7 +926,7 @@
                 }
             }
 
-            msg->post(1000000ll);  // poll again in a second.
+            msg->post(1000000LL);  // poll again in a second.
             break;
         }
 
@@ -1194,7 +1194,7 @@
             }
 
             if (rescan) {
-                msg->post(100000ll);
+                msg->post(100000LL);
                 mScanSourcesPending = true;
             }
             break;
@@ -2870,7 +2870,7 @@
             int64_t posMs;
             int64_t timeUs, posUs;
             driver->getCurrentPosition(&posMs);
-            posUs = posMs * 1000ll;
+            posUs = posMs * 1000LL;
             CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
             if (posUs < timeUs) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
index a9f2104..98c3403 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
@@ -567,7 +567,7 @@
 
         ccBuf->meta()->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, mSelectedTrack);
         ccBuf->meta()->setInt64("timeUs", timeUs);
-        ccBuf->meta()->setInt64("durationUs", 0ll);
+        ccBuf->meta()->setInt64("durationUs", 0LL);
 
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatClosedCaptionData);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 931b86e..49e3e3b 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -71,10 +71,10 @@
       mCCDecoder(ccDecoder),
       mPid(pid),
       mUid(uid),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
-      mNumFramesTotal(0ll),
-      mNumInputFramesDropped(0ll),
-      mNumOutputFramesDropped(0ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
+      mNumFramesTotal(0LL),
+      mNumInputFramesDropped(0LL),
+      mNumOutputFramesDropped(0LL),
       mVideoWidth(0),
       mVideoHeight(0),
       mIsAudio(true),
@@ -428,10 +428,10 @@
         // TODO: For now, layer fps is calculated for some specific architectures.
         // But it really should be extracted from the stream.
         mVideoTemporalLayerAggregateFps[0] =
-            mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+            mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - 1));
         for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
             mVideoTemporalLayerAggregateFps[i] =
-                mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+                mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - i))
                 + mVideoTemporalLayerAggregateFps[i - 1];
         }
     }
@@ -952,7 +952,7 @@
 
             int32_t layerId = 0;
             bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
-            if (mRenderer->getVideoLateByUs() > 100000ll
+            if (mRenderer->getVideoLateByUs() > 100000LL
                     && mIsVideoAVC
                     && !IsAVCReferenceFrame(accessUnit)) {
                 dropAccessUnit = true;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
index 1f1b69e..914f29f 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
@@ -122,7 +122,7 @@
         mRequestInputBuffersPending = true;
 
         sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
-        msg->post(10 * 1000ll);
+        msg->post(10 * 1000LL);
     }
 }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
index 0e0c1d8..0514e88 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
@@ -46,7 +46,7 @@
     : DecoderBase(notify),
       mSource(source),
       mRenderer(renderer),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
       mReachedEOS(true),
       mPendingAudioErr(OK),
       mPendingBuffersToDrain(0),
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index eff8866..56d708a 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -372,7 +372,7 @@
     ALOGD("seekTo(%p) (%lld ms, %d) at state %d", this, (long long)msec, mode, mState);
     Mutex::Autolock autoLock(mLock);
 
-    int64_t seekTimeUs = msec * 1000ll;
+    int64_t seekTimeUs = msec * 1000LL;
 
     switch (mState) {
         case STATE_PREPARED:
@@ -426,7 +426,7 @@
         return UNKNOWN_ERROR;
     }
 
-    *msec = (mDurationUs + 500ll) / 1000;
+    *msec = (mDurationUs + 500LL) / 1000;
 
     return OK;
 }
@@ -612,7 +612,7 @@
             int64_t msec = 0;
             // getCurrentPosition should always return OK
             getCurrentPosition(&msec);
-            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000ll);
+            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000LL);
         }
 
         case MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK:
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index d800412..9d9e179 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -67,10 +67,10 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 10000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000LL;
 
 // Maximum allowed delay from AudioSink, 1.5 seconds.
-static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
 
 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
 
@@ -84,7 +84,7 @@
 };
 
 // static
-const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000ll;
+const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
 
 NuPlayer2::Renderer::Renderer(
         const sp<MediaPlayer2Interface::AudioSink> &sink,
@@ -108,7 +108,7 @@
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorNumFramesWritten(-1),
-      mVideoLateByUs(0ll),
+      mVideoLateByUs(0LL),
       mNextVideoTimeMediaUs(-1),
       mHasAudio(false),
       mHasVideo(false),
@@ -1142,7 +1142,7 @@
         int64_t nowUs = ALooper::GetNowUs();
         int64_t mediaUs;
         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
-            return 0ll;
+            return 0LL;
         } else {
             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
         }
@@ -1357,7 +1357,7 @@
         tooLate = false;
     }
 
-    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
+    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
     entry->mNotifyConsumed->setInt32("render", !tooLate);
     entry->mNotifyConsumed->post();
     mVideoQueue.erase(mVideoQueue.begin());
@@ -1489,7 +1489,7 @@
 
     ALOGV("queueDiff = %.2f secs", diff / 1E6);
 
-    if (diff > 100000ll) {
+    if (diff > 100000LL) {
         // Audio data starts More than 0.1 secs before video.
         // Drop some audio.
 
diff --git a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
index aed925b..a70269e 100644
--- a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
@@ -30,7 +30,7 @@
 
 namespace android {
 
-const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+const int64_t kNearEOSTimeoutUs = 2000000LL; // 2 secs
 
 // Default Buffer Underflow/Prepare/StartServer/Overflow Marks
 static const int kUnderflowMarkMs   =  1000;  // 1 second
@@ -168,7 +168,7 @@
     // We're going to buffer at least 2 secs worth data on all tracks before
     // starting playback (both at startup and after a seek).
 
-    static const int64_t kMinDurationUs = 2000000ll;
+    static const int64_t kMinDurationUs = 2000000LL;
 
     int64_t mediaDurationUs = 0;
     getDuration(&mediaDurationUs);
@@ -272,7 +272,7 @@
 }
 
 status_t NuPlayer2::RTSPSource2::getDuration(int64_t *durationUs) {
-    *durationUs = -1ll;
+    *durationUs = -1LL;
 
     int64_t audioDurationUs;
     if (mAudioTrack != NULL
@@ -321,7 +321,7 @@
 
 void NuPlayer2::RTSPSource2::schedulePollBuffering() {
     sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
-    msg->post(1000000ll); // 1 second intervals
+    msg->post(1000000LL); // 1 second intervals
 }
 
 void NuPlayer2::RTSPSource2::checkBuffering(
@@ -345,10 +345,10 @@
         int64_t maxRebufferingMarkUs;
         {
             Mutex::Autolock _l(mBufferingSettingsLock);
-            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000ll;
+            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000LL;
             // TODO: maxRebufferingMarkUs could be larger than
             // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
-            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000ll;
+            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000LL;
         }
         // isFinished when duration is 0 checks for EOS result only
         if (bufferedDurationUs > initialMarkUs
@@ -368,7 +368,7 @@
                 ++overflowCount;
             }
             int64_t startServerMarkUs =
-                    (kUnderflowMarkMs * 1000ll + maxRebufferingMarkUs) / 2;
+                    (kUnderflowMarkMs * 1000LL + maxRebufferingMarkUs) / 2;
             if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
@@ -639,7 +639,7 @@
                 int64_t nptUs =
                     ((double)rtpTime - (double)info->mRTPTime)
                         / info->mTimeScale
-                        * 1000000ll
+                        * 1000000LL
                         + info->mNormalPlaytimeUs;
 
                 accessUnit->meta()->setInt64("timeUs", nptUs);
@@ -747,7 +747,7 @@
         TrackInfo info;
         info.mTimeScale = timeScale;
         info.mRTPTime = 0;
-        info.mNormalPlaytimeUs = 0ll;
+        info.mNormalPlaytimeUs = 0LL;
         info.mNPTMappingValid = false;
 
         if ((isAudio && mAudioTrack == NULL)
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e3ae02e..eae52c2 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -46,6 +46,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/OggWriter.h>
 #include <media/stagefright/PersistentSurface.h>
 #include <media/MediaProfiles.h>
 #include <camera/CameraParameters.h>
@@ -948,6 +949,10 @@
             status = setupMPEG2TSRecording();
             break;
 
+        case OUTPUT_FORMAT_OGG:
+            status = setupOggRecording();
+            break;
+
         default:
             ALOGE("Unsupported output file format: %d", mOutputFormat);
             status = UNKNOWN_ERROR;
@@ -1013,6 +1018,7 @@
         case OUTPUT_FORMAT_AAC_ADTS:
         case OUTPUT_FORMAT_RTP_AVP:
         case OUTPUT_FORMAT_MPEG2TS:
+        case OUTPUT_FORMAT_OGG:
         {
             sp<MetaData> meta = new MetaData;
             int64_t startTimeUs = systemTime() / 1000;
@@ -1113,6 +1119,9 @@
             format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
             format->setInt32("aac-profile", OMX_AUDIO_AACObjectELD);
             break;
+        case AUDIO_ENCODER_OPUS:
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_OPUS);
+            break;
 
         default:
             ALOGE("Unknown audio encoder: %d", mAudioEncoder);
@@ -1169,6 +1178,13 @@
     return setupRawAudioRecording();
 }
 
+status_t StagefrightRecorder::setupOggRecording() {
+    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_OGG);
+
+    mWriter = new OggWriter(mOutputFd);
+    return setupRawAudioRecording();
+}
+
 status_t StagefrightRecorder::setupAMRRecording() {
     CHECK(mOutputFormat == OUTPUT_FORMAT_AMR_NB ||
           mOutputFormat == OUTPUT_FORMAT_AMR_WB);
@@ -1813,6 +1829,7 @@
         case AUDIO_ENCODER_AAC:
         case AUDIO_ENCODER_HE_AAC:
         case AUDIO_ENCODER_AAC_ELD:
+        case AUDIO_ENCODER_OPUS:
             break;
 
         default:
@@ -1863,19 +1880,18 @@
         mTotalBitRate += mVideoBitRate;
     }
 
-    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
-        // Audio source is added at the end if it exists.
-        // This help make sure that the "recoding" sound is suppressed for
-        // camcorder applications in the recorded files.
-        // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
-        // disable audio for time lapse recording
-        bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
-        if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
-            err = setupAudioEncoder(writer);
-            if (err != OK) return err;
-            mTotalBitRate += mAudioBitRate;
-        }
+    // Audio source is added at the end if it exists.
+    // This help make sure that the "recoding" sound is suppressed for
+    // camcorder applications in the recorded files.
+    // disable audio for time lapse recording
+    const bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
+    if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
+        err = setupAudioEncoder(writer);
+        if (err != OK) return err;
+        mTotalBitRate += mAudioBitRate;
+    }
 
+    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
         if (mCaptureFpsEnable) {
             mp4writer->setCaptureRate(mCaptureFps);
         }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index faa2e59..2ada301 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -166,6 +166,7 @@
     void setupMPEG4orWEBMMetaData(sp<MetaData> *meta);
     status_t setupAMRRecording();
     status_t setupAACRecording();
+    status_t setupOggRecording();
     status_t setupRawAudioRecording();
     status_t setupRTPRecording();
     status_t setupMPEG2TSRecording();
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index f3b69d6..e2aa8f8 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -68,7 +68,7 @@
       mVideoDataGeneration(0),
       mFetchSubtitleDataGeneration(0),
       mFetchTimedTextDataGeneration(0),
-      mDurationUs(-1ll),
+      mDurationUs(-1LL),
       mAudioIsVorbis(false),
       mIsSecure(false),
       mIsStreaming(false),
@@ -76,7 +76,7 @@
       mUID(uid),
       mMediaClock(mediaClock),
       mFd(-1),
-      mBitrate(-1ll),
+      mBitrate(-1LL),
       mPendingReadBufferTypes(0) {
     ALOGV("GenericSource");
     CHECK(mediaClock != NULL);
@@ -727,7 +727,7 @@
     }
 
     if (msg->what() == kWhatFetchSubtitleData) {
-        subTimeUs -= 1000000ll;  // send subtile data one second earlier
+        subTimeUs -= 1000000LL;  // send subtile data one second earlier
     }
     sp<AMessage> msg2 = new AMessage(sendWhat, this);
     msg2->setInt32("generation", msgGeneration);
@@ -764,7 +764,7 @@
         notify->post();
 
         if (msg->what() == kWhatSendSubtitleData) {
-            nextSubTimeUs -= 1000000ll;  // send subtile data one second earlier
+            nextSubTimeUs -= 1000000LL;  // send subtile data one second earlier
         }
         mMediaClock->addTimer(msg, nextSubTimeUs);
     }
@@ -855,7 +855,7 @@
         // TODO: maxRebufferingMarkMs could be larger than
         // mBufferingSettings.mResumePlaybackMarkMs
         int64_t restartBufferingMarkUs =
-             mBufferingSettings.mResumePlaybackMarkMs * 1000ll / 2;
+             mBufferingSettings.mResumePlaybackMarkMs * 1000LL / 2;
         if (finalResult == OK) {
             if (durationUs < restartBufferingMarkUs) {
                 postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
@@ -1446,7 +1446,7 @@
         // TODO: maxRebufferingMarkMs could be larger than
         // mBufferingSettings.mResumePlaybackMarkMs
         int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs
-            : mBufferingSettings.mResumePlaybackMarkMs) * 1000ll;
+            : mBufferingSettings.mResumePlaybackMarkMs) * 1000LL;
         if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
             if (mPreparing || mSentPauseOnBuffering) {
                 Track *counterTrack =
@@ -1514,12 +1514,12 @@
     sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
     msg->setInt32("generation", mPollBufferingGeneration);
     // Enquires buffering status every second.
-    msg->post(1000000ll);
+    msg->post(1000000LL);
 }
 
 void NuPlayer::GenericSource::onPollBuffering() {
     status_t finalStatus = UNKNOWN_ERROR;
-    int64_t cachedDurationUs = -1ll;
+    int64_t cachedDurationUs = -1LL;
     ssize_t cachedDataRemaining = -1;
 
     if (mCachedSource != NULL) {
@@ -1527,15 +1527,15 @@
 
         if (finalStatus == OK) {
             off64_t size;
-            int64_t bitrate = 0ll;
+            int64_t bitrate = 0LL;
             if (mDurationUs > 0 && mCachedSource->getSize(&size) == OK) {
                 // |bitrate| uses bits/second unit, while size is number of bytes.
-                bitrate = size * 8000000ll / mDurationUs;
+                bitrate = size * 8000000LL / mDurationUs;
             } else if (mBitrate > 0) {
                 bitrate = mBitrate;
             }
             if (bitrate > 0) {
-                cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
+                cachedDurationUs = cachedDataRemaining * 8000000LL / bitrate;
             }
         }
     }
@@ -1560,8 +1560,8 @@
         return;
     }
 
-    if (cachedDurationUs >= 0ll) {
-        if (mDurationUs > 0ll) {
+    if (cachedDurationUs >= 0LL) {
+        if (mDurationUs > 0LL) {
             int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
             int percentage = 100.0 * cachedPosUs / mDurationUs;
             if (percentage > 100) {
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 11f1bfd..77e7885 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -271,10 +271,10 @@
 
         if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
             notify->post();
-            msg->post(delayUs > 0ll ? delayUs : 0ll);
+            msg->post(delayUs > 0LL ? delayUs : 0LL);
             return;
         } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
-            if (delayUs < -1000000ll) { // 1 second
+            if (delayUs < -1000000LL) { // 1 second
                 continue;
             }
             notify->post();
@@ -286,7 +286,7 @@
     }
 
     // try again in 1 second
-    msg->post(1000000ll);
+    msg->post(1000000LL);
 }
 
 void NuPlayer::HTTPLiveSource::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 3922767..5cf6bbd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -761,7 +761,7 @@
                 }
             }
 
-            msg->post(1000000ll);  // poll again in a second.
+            msg->post(1000000LL);  // poll again in a second.
             break;
         }
 
@@ -1049,7 +1049,7 @@
             }
 
             if (rescan) {
-                msg->post(100000ll);
+                msg->post(100000LL);
                 mScanSourcesPending = true;
             }
             break;
@@ -2670,7 +2670,7 @@
             int posMs;
             int64_t timeUs, posUs;
             driver->getCurrentPosition(&posMs);
-            posUs = (int64_t) posMs * 1000ll;
+            posUs = (int64_t) posMs * 1000LL;
             CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
             if (posUs < timeUs) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index ec30d0c..0156ad2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -554,7 +554,7 @@
 
         ccBuf->meta()->setInt32("track-index", mSelectedTrack);
         ccBuf->meta()->setInt64("timeUs", timeUs);
-        ccBuf->meta()->setInt64("durationUs", 0ll);
+        ccBuf->meta()->setInt64("durationUs", 0LL);
 
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatClosedCaptionData);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index a2ec699..df1ffde 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -71,10 +71,10 @@
       mCCDecoder(ccDecoder),
       mPid(pid),
       mUid(uid),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
-      mNumFramesTotal(0ll),
-      mNumInputFramesDropped(0ll),
-      mNumOutputFramesDropped(0ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
+      mNumFramesTotal(0LL),
+      mNumInputFramesDropped(0LL),
+      mNumOutputFramesDropped(0LL),
       mVideoWidth(0),
       mVideoHeight(0),
       mIsAudio(true),
@@ -409,10 +409,10 @@
         // TODO: For now, layer fps is calculated for some specific architectures.
         // But it really should be extracted from the stream.
         mVideoTemporalLayerAggregateFps[0] =
-            mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+            mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - 1));
         for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
             mVideoTemporalLayerAggregateFps[i] =
-                mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+                mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - i))
                 + mVideoTemporalLayerAggregateFps[i - 1];
         }
     }
@@ -934,7 +934,7 @@
 
             int32_t layerId = 0;
             bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
-            if (mRenderer->getVideoLateByUs() > 100000ll
+            if (mRenderer->getVideoLateByUs() > 100000LL
                     && mIsVideoAVC
                     && !IsAVCReferenceFrame(accessUnit)) {
                 dropAccessUnit = true;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index d0de7b0..3e96d27 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -120,7 +120,7 @@
         mRequestInputBuffersPending = true;
 
         sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
-        msg->post(10 * 1000ll);
+        msg->post(10 * 1000LL);
     }
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 6b05b53..0997e7d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -47,7 +47,7 @@
     : DecoderBase(notify),
       mSource(source),
       mRenderer(renderer),
-      mSkipRenderingUntilMediaTimeUs(-1ll),
+      mSkipRenderingUntilMediaTimeUs(-1LL),
       mReachedEOS(true),
       mPendingAudioErr(OK),
       mPendingBuffersToDrain(0),
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 44f223d..ba3ebaa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -474,7 +474,7 @@
     ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
     Mutex::Autolock autoLock(mLock);
 
-    int64_t seekTimeUs = msec * 1000ll;
+    int64_t seekTimeUs = msec * 1000LL;
 
     switch (mState) {
         case STATE_PREPARED:
@@ -531,7 +531,7 @@
         return UNKNOWN_ERROR;
     }
 
-    *msec = (mDurationUs + 500ll) / 1000;
+    *msec = (mDurationUs + 500LL) / 1000;
 
     return OK;
 }
@@ -744,7 +744,7 @@
             int msec = 0;
             // getCurrentPosition should always return OK
             getCurrentPosition(&msec);
-            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000ll);
+            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000LL);
         }
 
         case INVOKE_ID_UNSELECT_TRACK:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index b258332..c8f6738 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -70,10 +70,10 @@
 
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 10000000ll;
+static const int64_t kOffloadPauseMaxUs = 10000000LL;
 
 // Maximum allowed delay from AudioSink, 1.5 seconds.
-static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
 
 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
 
@@ -125,7 +125,7 @@
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorNumFramesWritten(-1),
-      mVideoLateByUs(0ll),
+      mVideoLateByUs(0LL),
       mNextVideoTimeMediaUs(-1),
       mHasAudio(false),
       mHasVideo(false),
@@ -580,7 +580,7 @@
                 // play back.
                 int64_t delayUs =
                     mAudioSink->msecsPerFrame()
-                        * numFramesPendingPlayout * 1000ll;
+                        * numFramesPendingPlayout * 1000LL;
                 if (mPlaybackRate > 1.0f) {
                     delayUs /= mPlaybackRate;
                 }
@@ -1172,7 +1172,7 @@
         int64_t nowUs = ALooper::GetNowUs();
         int64_t mediaUs;
         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
-            return 0ll;
+            return 0LL;
         } else {
             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
         }
@@ -1387,7 +1387,7 @@
         tooLate = false;
     }
 
-    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
+    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
     entry->mNotifyConsumed->setInt32("render", !tooLate);
     entry->mNotifyConsumed->post();
     mVideoQueue.erase(mVideoQueue.begin());
@@ -1519,7 +1519,7 @@
 
     ALOGV("queueDiff = %.2f secs", diff / 1E6);
 
-    if (diff > 100000ll) {
+    if (diff > 100000LL) {
         // Audio data starts More than 0.1 secs before video.
         // Drop some audio.
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 851217b..bf14ec2 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -30,7 +30,7 @@
 
 namespace android {
 
-const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+const int64_t kNearEOSTimeoutUs = 2000000LL; // 2 secs
 
 // Default Buffer Underflow/Prepare/StartServer/Overflow Marks
 static const int kUnderflowMarkMs   =  1000;  // 1 second
@@ -169,7 +169,7 @@
     // We're going to buffer at least 2 secs worth data on all tracks before
     // starting playback (both at startup and after a seek).
 
-    static const int64_t kMinDurationUs = 2000000ll;
+    static const int64_t kMinDurationUs = 2000000LL;
 
     int64_t mediaDurationUs = 0;
     getDuration(&mediaDurationUs);
@@ -273,7 +273,7 @@
 }
 
 status_t NuPlayer::RTSPSource::getDuration(int64_t *durationUs) {
-    *durationUs = -1ll;
+    *durationUs = -1LL;
 
     int64_t audioDurationUs;
     if (mAudioTrack != NULL
@@ -322,7 +322,7 @@
 
 void NuPlayer::RTSPSource::schedulePollBuffering() {
     sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
-    msg->post(1000000ll); // 1 second intervals
+    msg->post(1000000LL); // 1 second intervals
 }
 
 void NuPlayer::RTSPSource::checkBuffering(
@@ -346,10 +346,10 @@
         int64_t maxRebufferingMarkUs;
         {
             Mutex::Autolock _l(mBufferingSettingsLock);
-            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000ll;
+            initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000LL;
             // TODO: maxRebufferingMarkUs could be larger than
             // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
-            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000ll;
+            maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000LL;
         }
         // isFinished when duration is 0 checks for EOS result only
         if (bufferedDurationUs > initialMarkUs
@@ -369,7 +369,7 @@
                 ++overflowCount;
             }
             int64_t startServerMarkUs =
-                    (kUnderflowMarkMs * 1000ll + maxRebufferingMarkUs) / 2;
+                    (kUnderflowMarkMs * 1000LL + maxRebufferingMarkUs) / 2;
             if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
@@ -640,7 +640,7 @@
                 int64_t nptUs =
                     ((double)rtpTime - (double)info->mRTPTime)
                         / info->mTimeScale
-                        * 1000000ll
+                        * 1000000LL
                         + info->mNormalPlaytimeUs;
 
                 accessUnit->meta()->setInt64("timeUs", nptUs);
@@ -748,7 +748,7 @@
         TrackInfo info;
         info.mTimeScale = timeScale;
         info.mRTPTime = 0;
-        info.mNormalPlaytimeUs = 0ll;
+        info.mNormalPlaytimeUs = 0LL;
         info.mNPTMappingValid = false;
 
         if ((isAudio && mAudioTrack == NULL)
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index b3da53f..afdcd37 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -186,7 +186,7 @@
     // We're going to buffer at least 2 secs worth data on all tracks before
     // starting playback (both at startup and after a seek).
 
-    static const int64_t kMinDurationUs = 2000000ll;
+    static const int64_t kMinDurationUs = 2000000LL;
 
     sp<AnotherPacketSource> audioTrack = getSource(true /*audio*/);
     sp<AnotherPacketSource> videoTrack = getSource(false /*audio*/);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3080db5..114f492 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2237,6 +2237,15 @@
         } else {
             err = setupEAC3Codec(encoder, numChannels, sampleRate);
         }
+     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4)) {
+        int32_t numChannels;
+        int32_t sampleRate;
+        if (!msg->findInt32("channel-count", &numChannels)
+                || !msg->findInt32("sample-rate", &sampleRate)) {
+            err = INVALID_OPERATION;
+        } else {
+            err = setupAC4Codec(encoder, numChannels, sampleRate);
+        }
     }
 
     if (err != OK) {
@@ -2348,6 +2357,17 @@
     return err;
 }
 
+status_t ACodec::setAudioPresentation(int32_t presentationId, int32_t programId) {
+    OMX_AUDIO_CONFIG_ANDROID_AUDIOPRESENTATION config;
+    InitOMXParams(&config);
+    config.nPresentationId = (OMX_S32)presentationId;
+    config.nProgramId = (OMX_S32)programId;
+    status_t err = mOMXNode->setConfig(
+            (OMX_INDEXTYPE)OMX_IndexConfigAudioPresentation,
+            &config, sizeof(config));
+    return err;
+}
+
 status_t ACodec::setPriority(int32_t priority) {
     if (priority < 0) {
         return BAD_VALUE;
@@ -2893,6 +2913,38 @@
             (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3, &def, sizeof(def));
 }
 
+status_t ACodec::setupAC4Codec(
+        bool encoder, int32_t numChannels, int32_t sampleRate) {
+    status_t err = setupRawAudioFormat(
+            encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (encoder) {
+        ALOGW("AC4 encoding is not supported.");
+        return INVALID_OPERATION;
+    }
+
+    OMX_AUDIO_PARAM_ANDROID_AC4TYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexInput;
+
+    err = mOMXNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc4, &def, sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    def.nChannels = numChannels;
+    def.nSampleRate = sampleRate;
+
+    return mOMXNode->setParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc4, &def, sizeof(def));
+}
+
 static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
         bool isAMRWB, int32_t bps) {
     if (isAMRWB) {
@@ -5246,6 +5298,25 @@
                     break;
                 }
 
+                case OMX_AUDIO_CodingAndroidAC4:
+                {
+                    OMX_AUDIO_PARAM_ANDROID_AC4TYPE params;
+                    InitOMXParams(&params);
+                    params.nPortIndex = portIndex;
+
+                    err = mOMXNode->getParameter(
+                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc4,
+                            &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
+
+                    notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC4);
+                    notify->setInt32("channel-count", params.nChannels);
+                    notify->setInt32("sample-rate", params.nSampleRate);
+                    break;
+                }
+
                 case OMX_AUDIO_CodingAndroidOPUS:
                 {
                     OMX_AUDIO_PARAM_ANDROID_OPUSTYPE params;
@@ -7392,6 +7463,18 @@
         }
     }
 
+    int32_t presentationId = -1;
+    if (params->findInt32("audio-presentation-presentation-id", &presentationId)) {
+        int32_t programId = -1;
+        params->findInt32("audio-presentation-program-id", &programId);
+        status_t err = setAudioPresentation(presentationId, programId);
+        if (err != OK) {
+            ALOGI("[%s] failed setAudioPresentation. Failure is fine since this key is optional",
+                    mComponentName.c_str());
+            err = OK;
+        }
+    }
+
     // Ignore errors as failure is expected for codecs that aren't video encoders.
     (void)configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
 
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 02bb4e0..9aea88a 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -120,6 +120,7 @@
         "MediaMuxer.cpp",
         "NuCachedSource2.cpp",
         "NuMediaExtractor.cpp",
+        "OggWriter.cpp",
         "OMXClient.cpp",
         "OmxInfoBuilder.cpp",
         "RemoteMediaExtractor.cpp",
@@ -159,6 +160,7 @@
         "libstagefright_codecbase",
         "libstagefright_foundation",
         "libstagefright_omx_utils",
+        "libstagefright_opus_common",
         "libstagefright_xmlparser",
         "libRScpp",
         "libhidlallocatorutils",
@@ -179,6 +181,7 @@
         "libstagefright_webm",
         "libstagefright_timedtext",
         "libvpx",
+        "libogg",
         "libwebm",
         "libstagefright_esds",
         "libstagefright_id3",
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 1f8ccb3..81fc4ae 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "MediaExtractorFactory"
 #include <utils/Log.h>
 
+#include <android/dlext.h>
 #include <binder/IPCThreadState.h>
 #include <binder/PermissionCache.h>
 #include <binder/IServiceManager.h>
@@ -36,6 +37,23 @@
 #include <dirent.h>
 #include <dlfcn.h>
 
+// Copied from GraphicsEnv.cpp
+// TODO(b/37049319) Get this from a header once one exists
+extern "C" {
+  android_namespace_t* android_create_namespace(const char* name,
+                                                const char* ld_library_path,
+                                                const char* default_library_path,
+                                                uint64_t type,
+                                                const char* permitted_when_isolated_path,
+                                                android_namespace_t* parent);
+  bool android_link_namespaces(android_namespace_t* from,
+                               android_namespace_t* to,
+                               const char* shared_libs_sonames);
+  enum {
+     ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
+  };
+}
+
 namespace android {
 
 // static
@@ -145,6 +163,13 @@
 std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
 bool MediaExtractorFactory::gPluginsRegistered = false;
 bool MediaExtractorFactory::gIgnoreVersion = false;
+std::string MediaExtractorFactory::gLinkedLibraries;
+
+// static
+void MediaExtractorFactory::SetLinkedLibraries(const std::string& linkedLibraries) {
+    Mutex::Autolock autoLock(gPluginMutex);
+    gLinkedLibraries = linkedLibraries;
+}
 
 // static
 void *MediaExtractorFactory::sniff(
@@ -328,6 +353,62 @@
     }
 }
 
+//static
+void MediaExtractorFactory::RegisterExtractorsInApex(
+        const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
+    ALOGV("search for plugins at %s", libDirPath);
+    ALOGV("linked libs %s", gLinkedLibraries.c_str());
+
+    android_namespace_t *extractorNs = android_create_namespace("extractor",
+            nullptr,  // ld_library_path
+            libDirPath,
+            ANDROID_NAMESPACE_TYPE_ISOLATED,
+            nullptr,  // permitted_when_isolated_path
+            nullptr); // parent
+    if (!android_link_namespaces(extractorNs, nullptr, gLinkedLibraries.c_str())) {
+        ALOGE("Failed to link namespace. Failed to load extractor plug-ins in apex.");
+        return;
+    }
+    const android_dlextinfo dlextinfo = {
+        .flags = ANDROID_DLEXT_USE_NAMESPACE,
+        .library_namespace = extractorNs,
+    };
+
+    DIR *libDir = opendir(libDirPath);
+    if (libDir) {
+        struct dirent* libEntry;
+        while ((libEntry = readdir(libDir))) {
+            if (libEntry->d_name[0] == '.') {
+                continue;
+            }
+            String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
+            if (!libPath.contains("extractor.so")) {
+                continue;
+            }
+            void *libHandle = android_dlopen_ext(
+                    libPath.string(),
+                    RTLD_NOW | RTLD_LOCAL, &dlextinfo);
+            if (libHandle) {
+                GetExtractorDef getDef =
+                    (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
+                if (getDef) {
+                    ALOGV("registering sniffer for %s", libPath.string());
+                    RegisterExtractor(
+                            new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
+                } else {
+                    ALOGW("%s does not contain sniffer", libPath.string());
+                    dlclose(libHandle);
+                }
+            } else {
+                ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
+            }
+        }
+        closedir(libDir);
+    } else {
+        ALOGE("couldn't opendir(%s)", libDirPath);
+    }
+}
+
 static bool compareFunc(const sp<ExtractorPlugin>& first, const sp<ExtractorPlugin>& second) {
     return strcmp(first->def.extractor_name, second->def.extractor_name) < 0;
 }
@@ -346,6 +427,12 @@
 
     std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
 
+    RegisterExtractorsInApex("/apex/com.android.media/lib"
+#ifdef __LP64__
+            "64"
+#endif
+            , *newList);
+
     RegisterExtractorsInSystem("/system/lib"
 #ifdef __LP64__
             "64"
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 98f59b5..9ba2add 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -35,6 +35,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MPEG4Writer.h>
+#include <media/stagefright/OggWriter.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
@@ -52,6 +53,8 @@
         mWriter = new MPEG4Writer(fd);
     } else if (format == OUTPUT_FORMAT_WEBM) {
         mWriter = new WebmWriter(fd);
+    } else if (format == OUTPUT_FORMAT_OGG) {
+        mWriter = new OggWriter(fd);
     }
 
     if (mWriter != NULL) {
@@ -59,6 +62,8 @@
         if (format == OUTPUT_FORMAT_HEIF) {
             // Note that the key uses recorder file types.
             mFileMeta->setInt32(kKeyFileType, output_format::OUTPUT_FORMAT_HEIF);
+        } else if (format == OUTPUT_FORMAT_OGG) {
+            mFileMeta->setInt32(kKeyFileType, output_format::OUTPUT_FORMAT_OGG);
         }
         mState = INITIALIZED;
     }
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index a3259fd..dbc287e 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -308,6 +308,8 @@
 
 void parseVorbisComment(
         AMediaFormat *fileMeta, const char *comment, size_t commentLength) {
+    // Haptic tag is only kept here as it will only be used in extractor to generate channel mask.
+    const char* const haptic = "haptic";
     struct {
         const char *const mTag;
         const char *mKey;
@@ -328,6 +330,7 @@
         { "LYRICIST", AMEDIAFORMAT_KEY_LYRICIST },
         { "METADATA_BLOCK_PICTURE", AMEDIAFORMAT_KEY_ALBUMART },
         { "ANDROID_LOOP", AMEDIAFORMAT_KEY_LOOP },
+        { "ANDROID_HAPTIC", haptic },
     };
 
         for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
@@ -343,6 +346,15 @@
                     if (!strcasecmp(&comment[tagLen + 1], "true")) {
                         AMediaFormat_setInt32(fileMeta, AMEDIAFORMAT_KEY_LOOP, 1);
                     }
+                } else if (kMap[j].mKey == haptic) {
+                    char *end;
+                    errno = 0;
+                    const int hapticChannelCount = strtol(&comment[tagLen + 1], &end, 10);
+                    if (errno == 0) {
+                        AMediaFormat_setInt32(fileMeta, haptic, hapticChannelCount);
+                    } else {
+                        ALOGE("Error(%d) when parsing haptic channel count", errno);
+                    }
                 } else {
                     AMediaFormat_setString(fileMeta, kMap[j].mKey, &comment[tagLen + 1]);
                 }
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
new file mode 100644
index 0000000..ad55c56
--- /dev/null
+++ b/media/libstagefright/OggWriter.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OggWriter"
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <media/MediaSource.h>
+#include <media/mediarecorder.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OggWriter.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include "OpusHeader.h"
+
+extern "C" {
+#include <ogg/ogg.h>
+}
+
+// store the int32 value in little-endian order.
+static inline void writeint(char *buf, int base, int32_t val) {
+    buf[base + 3] = ((val) >> 24) & 0xff;
+    buf[base + 2] = ((val) >> 16) & 0xff;
+    buf[base + 1] = ((val) >> 8) & 0xff;
+    buf[base] = (val)&0xff;
+}
+
+// linkage between our header OggStreamState and the underlying ogg_stream_state
+// so that consumers of our interface do not require the ogg headers themselves.
+struct OggStreamState : public ogg_stream_state {};
+
+namespace android {
+
+OggWriter::OggWriter(int fd)
+      : mFd(dup(fd)),
+        mInitCheck(mFd < 0 ? NO_INIT : OK) {
+    // empty
+}
+
+OggWriter::~OggWriter() {
+    if (mStarted) {
+        reset();
+    }
+
+    if (mFd != -1) {
+        close(mFd);
+        mFd = -1;
+    }
+
+    free(mOs);
+}
+
+status_t OggWriter::initCheck() const {
+    return mInitCheck;
+}
+
+status_t OggWriter::addSource(const sp<MediaSource>& source) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mSource != NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    // Support is limited to single track of Opus audio.
+    const char* mime;
+    source->getFormat()->findCString(kKeyMIMEType, &mime);
+    const char* opus = MEDIA_MIMETYPE_AUDIO_OPUS;
+    if (strncasecmp(mime, opus, strlen(opus))) {
+        ALOGE("Track (%s) other than %s is not supported", mime, opus);
+        return ERROR_UNSUPPORTED;
+    }
+
+    mOs = (OggStreamState*) malloc(sizeof(ogg_stream_state));
+    if (ogg_stream_init((ogg_stream_state*)mOs, rand()) == -1) {
+        ALOGE("ogg stream init failed");
+        return UNKNOWN_ERROR;
+    }
+
+    // Write Ogg headers.
+    int32_t nChannels = 0;
+    if (!source->getFormat()->findInt32(kKeyChannelCount, &nChannels)) {
+        ALOGE("Missing format keys for audio track");
+        source->getFormat()->dumpToLog();
+        return BAD_VALUE;
+    }
+    source->getFormat()->dumpToLog();
+
+    int32_t sampleRate = 0;
+    if (!source->getFormat()->findInt32(kKeySampleRate, &sampleRate)) {
+        ALOGE("Missing format key for sample rate");
+        source->getFormat()->dumpToLog();
+        return UNKNOWN_ERROR;
+    }
+
+    mSampleRate = sampleRate;
+
+    OpusHeader header;
+    header.channels = nChannels;
+    header.num_streams = nChannels;
+    header.num_coupled = 0;
+    header.channel_mapping = ((nChannels > 8) ? 255 : (nChannels > 2));
+    header.gain_db = 0;
+    header.skip_samples = 0;
+
+    // headers are 21-bytes + something driven by channel count
+    // expect numbers in the low 30's here. WriteOpusHeader() will tell us
+    // if things are bad.
+    unsigned char header_data[100];
+    ogg_packet op;
+    ogg_page og;
+
+    const int packet_size = WriteOpusHeader(header, mSampleRate, (uint8_t*)header_data,
+                                            sizeof(header_data));
+
+    if (packet_size < 0) {
+        ALOGE("opus header writing failed");
+        return UNKNOWN_ERROR;
+    }
+    op.packet = header_data;
+    op.bytes = packet_size;
+    op.b_o_s = 1;
+    op.e_o_s = 0;
+    op.granulepos = 0;
+    op.packetno = 0;
+    ogg_stream_packetin((ogg_stream_state*)mOs, &op);
+
+    int ret;
+    while ((ret = ogg_stream_flush((ogg_stream_state*)mOs, &og))) {
+        if (!ret) break;
+        write(mFd, og.header, og.header_len);
+        write(mFd, og.body, og.body_len);
+    }
+
+
+    const char* vendor_string = "libopus";
+    const int vendor_length = strlen(vendor_string);
+    int user_comment_list_length = 0;
+
+    const int comments_length = 8 + 4 + vendor_length + 4 + user_comment_list_length;
+    char* comments = (char*)malloc(comments_length);
+    if (comments == NULL) {
+        ALOGE("failed to allocate ogg comment buffer");
+        return UNKNOWN_ERROR;
+    }
+    memcpy(comments, "OpusTags", 8);
+    writeint(comments, 8, vendor_length);
+    memcpy(comments + 12, vendor_string, vendor_length);
+    writeint(comments, 12 + vendor_length, user_comment_list_length);
+
+    op.packet = (unsigned char*)comments;
+    op.bytes = comments_length;
+    op.b_o_s = 0;
+    op.e_o_s = 0;
+    op.granulepos = 0;
+    op.packetno = 1;
+    ogg_stream_packetin((ogg_stream_state*)mOs, &op);
+
+    while ((ret = ogg_stream_flush((ogg_stream_state*)mOs, &og))) {
+        if (!ret) break;
+        write(mFd, og.header, og.header_len);
+        write(mFd, og.body, og.body_len);
+    }
+
+    mSource = source;
+    free(comments);
+    return OK;
+}
+
+status_t OggWriter::start(MetaData* /* params */) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mSource == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    if (mStarted && mPaused) {
+        mPaused = false;
+        mResumed = true;
+        return OK;
+    } else if (mStarted) {
+        // Already started, does nothing
+        return OK;
+    }
+
+    status_t err = mSource->start();
+
+    if (err != OK) {
+        return err;
+    }
+
+    pthread_attr_t attr;
+    pthread_attr_init(&attr);
+    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+    mReachedEOS = false;
+    mDone = false;
+
+    pthread_create(&mThread, &attr, ThreadWrapper, this);
+    pthread_attr_destroy(&attr);
+
+    mStarted = true;
+
+    return OK;
+}
+
+status_t OggWriter::pause() {
+    if (!mStarted) {
+        return OK;
+    }
+    mPaused = true;
+    return OK;
+}
+
+status_t OggWriter::reset() {
+    if (!mStarted) {
+        return OK;
+    }
+
+    mDone = true;
+
+    void* dummy;
+    pthread_join(mThread, &dummy);
+
+    status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+    {
+        status_t status = mSource->stop();
+        if (err == OK && (status != OK && status != ERROR_END_OF_STREAM)) {
+            err = status;
+        }
+    }
+
+    mStarted = false;
+    return err;
+}
+
+bool OggWriter::exceedsFileSizeLimit() {
+    if (mMaxFileSizeLimitBytes == 0) {
+        return false;
+    }
+    return mEstimatedSizeBytes > mMaxFileSizeLimitBytes;
+}
+
+bool OggWriter::exceedsFileDurationLimit() {
+    if (mMaxFileDurationLimitUs == 0) {
+        return false;
+    }
+    return mEstimatedDurationUs > mMaxFileDurationLimitUs;
+}
+
+// static
+void* OggWriter::ThreadWrapper(void* me) {
+    return (void*)(uintptr_t) static_cast<OggWriter*>(me)->threadFunc();
+}
+
+status_t OggWriter::threadFunc() {
+    mEstimatedDurationUs = 0;
+    mEstimatedSizeBytes = 0;
+    bool stoppedPrematurely = true;
+    int64_t previousPausedDurationUs = 0;
+    int64_t maxTimestampUs = 0;
+    status_t err = OK;
+
+    prctl(PR_SET_NAME, (unsigned long)"OggWriter", 0, 0, 0);
+
+    while (!mDone) {
+        MediaBufferBase* buffer = nullptr;
+        err = mSource->read(&buffer);
+
+        if (err != OK) {
+            ALOGW("failed to read next buffer");
+            break;
+        }
+
+        if (mPaused) {
+            buffer->release();
+            buffer = nullptr;
+            continue;
+        }
+        mEstimatedSizeBytes += buffer->range_length();
+        if (exceedsFileSizeLimit()) {
+            buffer->release();
+            buffer = nullptr;
+            notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+            ALOGW("estimated size(%" PRId64 ") exceeds limit (%" PRId64 ")",
+                  mEstimatedSizeBytes, mMaxFileSizeLimitBytes);
+            break;
+        }
+        int64_t timestampUs;
+        CHECK(buffer->meta_data().findInt64(kKeyTime, &timestampUs));
+        if (timestampUs > mEstimatedDurationUs) {
+            mEstimatedDurationUs = timestampUs;
+        }
+        if (mResumed) {
+            previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+            mResumed = false;
+        }
+
+        timestampUs -= previousPausedDurationUs;
+
+        ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64, timestampUs,
+              previousPausedDurationUs);
+        if (timestampUs > maxTimestampUs) {
+            maxTimestampUs = timestampUs;
+        }
+
+        if (exceedsFileDurationLimit()) {
+            buffer->release();
+            buffer = nullptr;
+            notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+            ALOGW("estimated duration(%" PRId64 " us) exceeds limit(%" PRId64 " us)",
+                  mEstimatedDurationUs, mMaxFileDurationLimitUs);
+            break;
+        }
+
+        ogg_packet op;
+        ogg_page og;
+        op.packet = (uint8_t*)buffer->data() + buffer->range_offset();
+        op.bytes = (long)buffer->range_length();
+        op.b_o_s = 0;
+        op.e_o_s = mReachedEOS ? 1 : 0;
+        // granulepos is the total number of PCM audio samples @ 48 kHz, up to and
+        // including the current packet.
+        ogg_int64_t granulepos = (48000 * mEstimatedDurationUs) / 1000000;
+        op.granulepos = granulepos;
+
+        // Headers are at packets 0 and 1.
+        op.packetno = 2 + (ogg_int32_t)mCurrentPacketId++;
+        ogg_stream_packetin((ogg_stream_state*)mOs, &op);
+        size_t n = 0;
+
+        while (ogg_stream_flush((ogg_stream_state*)mOs, &og) > 0) {
+            write(mFd, og.header, og.header_len);
+            write(mFd, og.body, og.body_len);
+            n = n + og.header_len + og.body_len;
+        }
+
+        if (n < buffer->range_length()) {
+            buffer->release();
+            buffer = nullptr;
+            err = ERROR_IO;
+            break;
+        }
+
+        if (err != OK) {
+            break;
+        }
+
+        stoppedPrematurely = false;
+
+        buffer->release();
+        buffer = nullptr;
+    }
+
+    // end of stream is an ok thing
+    if (err == ERROR_END_OF_STREAM) {
+        err = OK;
+    }
+
+    if (err == OK && stoppedPrematurely) {
+        err = ERROR_MALFORMED;
+    }
+
+    close(mFd);
+    mFd = -1;
+    mReachedEOS = true;
+
+    return err;
+}
+
+bool OggWriter::reachedEOS() {
+    return mReachedEOS;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
index 26d7dff..b90649c 100644
--- a/media/libstagefright/StagefrightPluginLoader.cpp
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -34,9 +34,7 @@
 
 }  // unnamed namespace
 
-StagefrightPluginLoader::StagefrightPluginLoader(const char *libPath)
-    : mCreateCodec(nullptr),
-      mCreateBuilder(nullptr) {
+StagefrightPluginLoader::StagefrightPluginLoader(const char *libPath) {
     if (android::base::GetIntProperty("debug.media.codec2", 0) == 0) {
         ALOGD("CCodec is disabled.");
         return;
diff --git a/media/libstagefright/StagefrightPluginLoader.h b/media/libstagefright/StagefrightPluginLoader.h
index 999d30c..78effbf 100644
--- a/media/libstagefright/StagefrightPluginLoader.h
+++ b/media/libstagefright/StagefrightPluginLoader.h
@@ -40,10 +40,10 @@
     static Mutex sMutex;
     static std::unique_ptr<StagefrightPluginLoader> sInstance;
 
-    void *mLibHandle;
-    CodecBase::CreateCodecFunc mCreateCodec;
-    MediaCodecListBuilderBase::CreateBuilderFunc mCreateBuilder;
-    CodecBase::CreateInputSurfaceFunc mCreateInputSurface;
+    void *mLibHandle{nullptr};
+    CodecBase::CreateCodecFunc mCreateCodec{nullptr};
+    MediaCodecListBuilderBase::CreateBuilderFunc mCreateBuilder{nullptr};
+    CodecBase::CreateInputSurfaceFunc mCreateInputSurface{nullptr};
 };
 
 }  // namespace android
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index df929ae..670b607 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1822,6 +1822,7 @@
     { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
     { MEDIA_MIMETYPE_AUDIO_AC3,         AUDIO_FORMAT_AC3},
     { MEDIA_MIMETYPE_AUDIO_EAC3,        AUDIO_FORMAT_E_AC3},
+    { MEDIA_MIMETYPE_AUDIO_EAC3_JOC,    AUDIO_FORMAT_E_AC3_JOC},
     { MEDIA_MIMETYPE_AUDIO_AC4,         AUDIO_FORMAT_AC4},
     { MEDIA_MIMETYPE_AUDIO_FLAC,        AUDIO_FORMAT_FLAC},
     { MEDIA_MIMETYPE_AUDIO_ALAC,        AUDIO_FORMAT_ALAC },
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index a8adff5..fb51cc5 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -365,6 +365,8 @@
 // static
 AString AString::FromParcel(const Parcel &parcel) {
     size_t size = static_cast<size_t>(parcel.readInt32());
+    // The static analyzer incorrectly reports a false-positive here in c++17.
+    // https://bugs.llvm.org/show_bug.cgi?id=38176 . NOLINTNEXTLINE
     return AString(static_cast<const char *>(parcel.readInplace(size)), size);
 }
 
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index f93ae65..aba44bb 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
 const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
 const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC = "audio/eac3-joc";
 const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 523378e..8edddcc 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
 extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
 extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC;
 extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 73f93d1..80125d4 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -461,6 +461,8 @@
 
     status_t setupEAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
 
+    status_t setupAC4Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+
     status_t selectAudioPortFormat(
             OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
 
@@ -477,6 +479,7 @@
     status_t setPriority(int32_t priority);
     status_t setLatency(uint32_t latency);
     status_t getLatency(uint32_t *latency);
+    status_t setAudioPresentation(int32_t presentationId, int32_t programId);
     status_t setOperatingRate(float rateFloat, bool isVideo);
     status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
     status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index a462ae7..704bfdd 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -135,6 +135,8 @@
 constexpr int32_t VP9Profile3 = 0x08;
 constexpr int32_t VP9Profile2HDR = 0x1000;
 constexpr int32_t VP9Profile3HDR = 0x2000;
+constexpr int32_t VP9Profile2HDR10Plus = 0x4000;
+constexpr int32_t VP9Profile3HDR10Plus = 0x8000;
 
 constexpr int32_t VP9Level1  = 0x1;
 constexpr int32_t VP9Level11 = 0x2;
@@ -155,6 +157,7 @@
 constexpr int32_t HEVCProfileMain10      = 0x02;
 constexpr int32_t HEVCProfileMainStill   = 0x04;
 constexpr int32_t HEVCProfileMain10HDR10 = 0x1000;
+constexpr int32_t HEVCProfileMain10HDR10Plus = 0x2000;
 
 constexpr int32_t HEVCMainTierLevel1  = 0x1;
 constexpr int32_t HEVCHighTierLevel1  = 0x2;
@@ -343,6 +346,7 @@
 constexpr char KEY_GRID_COLUMNS[] = "grid-cols";
 constexpr char KEY_GRID_ROWS[] = "grid-rows";
 constexpr char KEY_HDR_STATIC_INFO[] = "hdr-static-info";
+constexpr char KEY_HDR10_PLUS_INFO[] = "hdr10-plus-info";
 constexpr char KEY_HEIGHT[] = "height";
 constexpr char KEY_I_FRAME_INTERVAL[] = "i-frame-interval";
 constexpr char KEY_INTRA_REFRESH_PERIOD[] = "intra-refresh-period";
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index ef9f7ed..84e01f3 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -35,17 +35,21 @@
             const sp<DataSource> &source, const char *mime = NULL);
     static void LoadPlugins(const ::std::string& apkPath);
     static status_t dump(int fd, const Vector<String16>& args);
+    static void SetLinkedLibraries(const std::string& linkedLibraries);
 
 private:
     static Mutex gPluginMutex;
     static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
     static bool gPluginsRegistered;
     static bool gIgnoreVersion;
+    static std::string gLinkedLibraries;
 
     static void RegisterExtractorsInApk(
             const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractorsInSystem(
             const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
+    static void RegisterExtractorsInApex(
+            const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractor(
             const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
 
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index 66f4d72..69d6cde 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -49,6 +49,7 @@
         OUTPUT_FORMAT_WEBM        = 1,
         OUTPUT_FORMAT_THREE_GPP   = 2,
         OUTPUT_FORMAT_HEIF        = 3,
+        OUTPUT_FORMAT_OGG         = 4,
         OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
     };
 
diff --git a/media/libstagefright/include/media/stagefright/OggWriter.h b/media/libstagefright/include/media/stagefright/OggWriter.h
new file mode 100644
index 0000000..e3837cd
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/OggWriter.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OGG_WRITER_H_
+
+#define OGG_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+
+struct OggStreamState;
+
+namespace android {
+
+struct OggWriter : public MediaWriter {
+    OggWriter(int fd);
+
+    status_t initCheck() const;
+
+    virtual status_t addSource(const sp<MediaSource>& source);
+    virtual bool reachedEOS();
+    virtual status_t start(MetaData* params = NULL);
+    virtual status_t stop() { return reset(); }
+    virtual status_t pause();
+
+protected:
+    ~OggWriter();
+
+private:
+    int mFd;
+    status_t mInitCheck;
+    sp<MediaSource> mSource;
+    bool mStarted = false;
+    volatile bool mPaused = false;
+    volatile bool mResumed = false;
+    volatile bool mDone;
+    volatile bool mReachedEOS;
+    pthread_t mThread;
+    int64_t mSampleRate;
+    int64_t mEstimatedSizeBytes;
+    int64_t mEstimatedDurationUs;
+
+    static void* ThreadWrapper(void*);
+    status_t threadFunc();
+    bool exceedsFileSizeLimit();
+    bool exceedsFileDurationLimit();
+    status_t reset();
+
+    int32_t mCurrentPacketId;
+    OggStreamState* mOs = nullptr;
+
+    OggWriter(const OggWriter&);
+    OggWriter& operator=(const OggWriter&);
+};
+
+}  // namespace android
+
+#endif  // OGG_WRITER_H_
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index c499c77..b187035 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -164,6 +164,8 @@
             "audio_decoder.ac3", "audio_encoder.ac3" },
         { MEDIA_MIMETYPE_AUDIO_EAC3,
             "audio_decoder.eac3", "audio_encoder.eac3" },
+        { MEDIA_MIMETYPE_AUDIO_EAC3_JOC,
+            "audio_decoder.eac3_joc", "audio_encoder.eac3_joc" },
         { MEDIA_MIMETYPE_AUDIO_AC4,
             "audio_decoder.ac4", "audio_encoder.ac4" },
         { MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
diff --git a/media/libstagefright/opus/Android.bp b/media/libstagefright/opus/Android.bp
new file mode 100644
index 0000000..c5086ec
--- /dev/null
+++ b/media/libstagefright/opus/Android.bp
@@ -0,0 +1,21 @@
+cc_library_shared {
+    name: "libstagefright_opus_common",
+    vendor_available: true,
+
+    export_include_dirs: ["include"],
+
+    srcs: ["OpusHeader.cpp"],
+
+    shared_libs: ["liblog"],
+
+    cflags: ["-Werror"],
+
+    sanitize: {
+        integer_overflow: true,
+        cfi: true,
+        diag: {
+            integer_overflow: true,
+            cfi: true,
+        },
+    },
+}
\ No newline at end of file
diff --git a/media/libstagefright/opus/OpusHeader.cpp b/media/libstagefright/opus/OpusHeader.cpp
new file mode 100644
index 0000000..e4a460c
--- /dev/null
+++ b/media/libstagefright/opus/OpusHeader.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftOpus"
+
+#include <cstring>
+#include <stdint.h>
+
+#include <log/log.h>
+
+#include "OpusHeader.h"
+
+namespace android {
+
+// Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies
+// mappings for up to 8 channels. This information is part of the Vorbis I
+// Specification:
+// http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
+constexpr int kMaxChannels = 8;
+
+constexpr uint8_t kOpusChannelMap[kMaxChannels][kMaxChannels] = {
+        {0},
+        {0, 1},
+        {0, 2, 1},
+        {0, 1, 2, 3},
+        {0, 4, 1, 2, 3},
+        {0, 4, 1, 2, 3, 5},
+        {0, 4, 1, 2, 3, 5, 6},
+        {0, 6, 1, 2, 3, 4, 5, 7},
+};
+
+// Opus always has a 48kHz output rate. This is true for all Opus, not just this
+// implementation.
+constexpr int kRate = 48000;
+// Size of the Opus header excluding optional mapping information.
+constexpr size_t kOpusHeaderSize = 19;
+// Offset to magic string that starts Opus header.
+constexpr size_t kOpusHeaderLabelOffset = 0;
+// Offset to Opus version in the Opus header.
+constexpr size_t kOpusHeaderVersionOffset = 8;
+// Offset to the channel count byte in the Opus header.
+constexpr size_t kOpusHeaderChannelsOffset = 9;
+// Offset to the pre-skip value in the Opus header.
+constexpr size_t kOpusHeaderSkipSamplesOffset = 10;
+// Offset to sample rate in the Opus header.
+constexpr size_t kOpusHeaderSampleRateOffset = 12;
+// Offset to the gain value in the Opus header.
+constexpr size_t kOpusHeaderGainOffset = 16;
+// Offset to the channel mapping byte in the Opus header.
+constexpr size_t kOpusHeaderChannelMappingOffset = 18;
+// Opus Header contains a stream map. The mapping values are in the header
+// beyond the always present |kOpusHeaderSize| bytes of data. The mapping
+// data contains stream count, coupling information, and per channel mapping
+// values:
+//   - Byte 0: Number of streams.
+//   - Byte 1: Number coupled.
+//   - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping
+//             values.
+// Offset to the number of streams in the Opus header.
+constexpr size_t kOpusHeaderNumStreamsOffset = 19;
+// Offset to the number of streams that are coupled in the Opus header.
+constexpr size_t kOpusHeaderNumCoupledStreamsOffset = 20;
+// Offset to the stream to channel mapping in the Opus header.
+constexpr size_t kOpusHeaderStreamMapOffset = 21;
+// Maximum packet size used in Xiph's opusdec.
+constexpr int kMaxOpusOutputPacketSizeSamples = 960 * 6;
+
+// Default audio output channel layout. Used to initialize |stream_map| in
+// OpusHeader, and passed to opus_multistream_decoder_create() when the header
+// does not contain mapping information. The values are valid only for mono and
+// stereo output: Opus streams with more than 2 channels require a stream map.
+constexpr int kMaxChannelsWithDefaultLayout = 2;
+constexpr uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = {0, 1};
+
+static uint16_t ReadLE16(const uint8_t* data, size_t data_size, uint32_t read_offset) {
+    // check whether the 2nd byte is within the buffer
+    if (read_offset + 1 >= data_size) return 0;
+    uint16_t val;
+    val = data[read_offset];
+    val |= data[read_offset + 1] << 8;
+    return val;
+}
+
+// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
+bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header) {
+    if (data_size < kOpusHeaderSize) {
+        ALOGV("Header size is too small.");
+        return false;
+    }
+    header->channels = data[kOpusHeaderChannelsOffset];
+
+    if (header->channels < 1 || header->channels > kMaxChannels) {
+        ALOGV("Invalid Header, bad channel count: %d", header->channels);
+        return false;
+    }
+    header->skip_samples = ReadLE16(data, data_size, kOpusHeaderSkipSamplesOffset);
+    header->gain_db = static_cast<int16_t>(ReadLE16(data, data_size, kOpusHeaderGainOffset));
+    header->channel_mapping = data[kOpusHeaderChannelMappingOffset];
+    if (!header->channel_mapping) {
+        if (header->channels > kMaxChannelsWithDefaultLayout) {
+            ALOGV("Invalid Header, missing stream map.");
+            return false;
+        }
+        header->num_streams = 1;
+        header->num_coupled = header->channels > 1;
+        header->stream_map[0] = 0;
+        header->stream_map[1] = 1;
+        return true;
+    }
+    if (data_size < kOpusHeaderStreamMapOffset + header->channels) {
+        ALOGV("Invalid stream map; insufficient data for current channel "
+              "count: %d",
+              header->channels);
+        return false;
+    }
+    header->num_streams = data[kOpusHeaderNumStreamsOffset];
+    header->num_coupled = data[kOpusHeaderNumCoupledStreamsOffset];
+    if (header->num_streams + header->num_coupled != header->channels) {
+        ALOGV("Inconsistent channel mapping.");
+        return false;
+    }
+    for (int i = 0; i < header->channels; ++i)
+        header->stream_map[i] = data[kOpusHeaderStreamMapOffset + i];
+    return true;
+}
+
+int WriteOpusHeader(const OpusHeader &header, int input_sample_rate,
+                    uint8_t* output, size_t output_size) {
+    // See https://wiki.xiph.org/OggOpus#ID_Header.
+    const size_t total_size = kOpusHeaderStreamMapOffset + header.channels;
+    if (output_size < total_size) {
+        ALOGE("Output buffer too small for header.");
+        return -1;
+    }
+
+    // ensure entire header is cleared, even though we overwrite much of it below
+    memset(output, 0, output_size);
+
+    // Set magic signature.
+    memcpy(output + kOpusHeaderLabelOffset, "OpusHead", 8);
+    // Set Opus version.
+    output[kOpusHeaderVersionOffset] = 1;
+    // Set channel count.
+    output[kOpusHeaderChannelsOffset] = (uint8_t)header.channels;
+    // Set pre-skip
+    memcpy(output + kOpusHeaderSkipSamplesOffset, &header.skip_samples, sizeof(uint16_t));
+    // Set original input sample rate in Hz.
+    memcpy(output + kOpusHeaderSampleRateOffset, &input_sample_rate, sizeof(uint32_t));
+    // Set output gain in dB.
+    memcpy(output + kOpusHeaderGainOffset, &header.gain_db, sizeof(uint16_t));
+
+    if (header.channels > 2) {
+        // Set channel mapping
+        output[kOpusHeaderChannelMappingOffset] = 1;
+        // Assuming no coupled streams. This should actually be
+        // channels() - |coupled_streams|.
+        output[kOpusHeaderNumStreamsOffset] = header.channels;
+        output[kOpusHeaderNumCoupledStreamsOffset] = 0;
+
+        // Set the actual stream map.
+        for (int i = 0; i < header.channels; ++i) {
+            output[kOpusHeaderStreamMapOffset + i] = kOpusChannelMap[header.channels - 1][i];
+        }
+        return kOpusHeaderStreamMapOffset + header.channels + 1;
+    } else {
+        output[kOpusHeaderChannelMappingOffset] = 0;
+        return kOpusHeaderChannelMappingOffset + 1;
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/opus/include/OpusHeader.h b/media/libstagefright/opus/include/OpusHeader.h
new file mode 100644
index 0000000..f9f79cd
--- /dev/null
+++ b/media/libstagefright/opus/include/OpusHeader.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The Opus specification is part of IETF RFC 6716:
+ * http://tools.ietf.org/html/rfc6716
+ */
+
+#ifndef OPUS_HEADER_H_
+#define OPUS_HEADER_H_
+
+namespace android {
+
+struct OpusHeader {
+    int channels;
+    int channel_mapping;
+    int num_streams;
+    int num_coupled;
+    int16_t gain_db;
+    int skip_samples;
+    uint8_t stream_map[8];
+};
+
+bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
+int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
+}  // namespace android
+
+#endif  // OPUS_HEADER_H_
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 64ecc2d..1f840b7 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -28,6 +28,7 @@
 
     shared_libs: [
         "libstagefright_foundation",
+        "libstagefright_opus_common",
         "libutils",
         "liblog",
     ],
diff --git a/media/libstagefright/webm/WebmElement.cpp b/media/libstagefright/webm/WebmElement.cpp
index a5120b9..4d504e0 100644
--- a/media/libstagefright/webm/WebmElement.cpp
+++ b/media/libstagefright/webm/WebmElement.cpp
@@ -305,6 +305,7 @@
 }
 
 sp<WebmElement> WebmElement::AudioTrackEntry(
+        const char *codec,
         int chans,
         double rate,
         const sp<ABuffer> &buf,
@@ -322,7 +323,7 @@
             uid,
             lacing,
             lang,
-            "A_VORBIS",
+            codec,
             kAudioType,
             trackEntryFields);
 
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/WebmElement.h
index ffbba1b..a94c23f 100644
--- a/media/libstagefright/webm/WebmElement.h
+++ b/media/libstagefright/webm/WebmElement.h
@@ -50,6 +50,7 @@
     static sp<WebmElement> SegmentInfo(uint64_t scale = 1000000, double dur = 0);
 
     static sp<WebmElement> AudioTrackEntry(
+            const char *codec,
             int chans,
             double rate,
             const sp<ABuffer> &buf,
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 4d73eb8..7b4b23a 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -23,6 +23,8 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <OpusHeader.h>
 
 #include <utils/Errors.h>
 
@@ -112,46 +114,102 @@
 // static
 sp<WebmElement> WebmWriter::audioTrack(const sp<MetaData>& md) {
     int32_t nChannels, samplerate;
-    uint32_t type;
-    const void *headerData1;
-    const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
-            'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
-    const void *headerData3;
-    size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
+    const char* mimeType;
 
     if (!md->findInt32(kKeyChannelCount, &nChannels)
-            || !md->findInt32(kKeySampleRate, &samplerate)
-            || !md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
-            || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+        || !md->findInt32(kKeySampleRate, &samplerate)
+        || !md->findCString(kKeyMIMEType, &mimeType)) {
         ALOGE("Missing format keys for audio track");
         md->dumpToLog();
         return NULL;
     }
 
-    size_t codecPrivateSize = 1;
-    codecPrivateSize += XiphLaceCodeLen(headerSize1);
-    codecPrivateSize += XiphLaceCodeLen(headerSize2);
-    codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
+    if (!strncasecmp(mimeType, MEDIA_MIMETYPE_AUDIO_OPUS, strlen(MEDIA_MIMETYPE_AUDIO_OPUS))) {
+        // Opus in WebM is a well-known, yet under-documented, format. The codec private data
+        // of the track is an Opus Ogg header (https://tools.ietf.org/html/rfc7845#section-5.1)
+        // The name of the track isn't standardized, its value should be "A_OPUS".
+        OpusHeader header;
+        header.channels = nChannels;
+        header.num_streams = nChannels;
+        header.num_coupled = 0;
+        // - Channel mapping family (8 bits unsigned)
+        //  --  0 = one stream: mono or L,R stereo
+        //  --  1 = channels in vorbis spec order: mono or L,R stereo or ... or FL,C,FR,RL,RR,LFE, ...
+        //  --  2..254 = reserved (treat as 255)
+        //  --  255 = no defined channel meaning
+        //
+        //  our implementation encodes:  0, 1, or 255
+        header.channel_mapping = ((nChannels > 8) ? 255 : (nChannels > 2));
+        header.gain_db = 0;
+        header.skip_samples = 0;
 
-    off_t off = 0;
-    sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
-    uint8_t *codecPrivateData = codecPrivateBuf->data();
-    codecPrivateData[off++] = 2;
+        // headers are 21-bytes + something driven by channel count
+        // expect numbers in the low 30's here. WriteOpusHeader() will tell us
+        // if things are bad.
+        unsigned char header_data[100];
+        int headerSize = WriteOpusHeader(header, samplerate, (uint8_t*)header_data,
+                                            sizeof(header_data));
 
-    off += XiphLaceEnc(codecPrivateData + off, headerSize1);
-    off += XiphLaceEnc(codecPrivateData + off, headerSize2);
+        if (headerSize < 0) {
+            // didn't fill out that header for some reason
+            ALOGE("failed to generate OPUS header");
+            return NULL;
+        }
 
-    memcpy(codecPrivateData + off, headerData1, headerSize1);
-    off += headerSize1;
-    memcpy(codecPrivateData + off, headerData2, headerSize2);
-    off += headerSize2;
-    memcpy(codecPrivateData + off, headerData3, headerSize3);
+        size_t codecPrivateSize = 0;
+        codecPrivateSize += headerSize;
 
-    sp<WebmElement> entry = WebmElement::AudioTrackEntry(
-            nChannels,
-            samplerate,
-            codecPrivateBuf);
-    return entry;
+        off_t off = 0;
+        sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
+        uint8_t* codecPrivateData = codecPrivateBuf->data();
+
+        memcpy(codecPrivateData + off, (uint8_t*)header_data, headerSize);
+        sp<WebmElement> entry =
+                WebmElement::AudioTrackEntry("A_OPUS", nChannels, samplerate, codecPrivateBuf);
+        return entry;
+    } else if (!strncasecmp(mimeType,
+                            MEDIA_MIMETYPE_AUDIO_VORBIS,
+                            strlen(MEDIA_MIMETYPE_AUDIO_VORBIS))) {
+        uint32_t type;
+        const void *headerData1;
+        const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
+                'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
+        const void *headerData3;
+        size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
+
+        if (!md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
+            || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+            ALOGE("Missing header format keys for vorbis track");
+            md->dumpToLog();
+            return NULL;
+        }
+
+        size_t codecPrivateSize = 1;
+        codecPrivateSize += XiphLaceCodeLen(headerSize1);
+        codecPrivateSize += XiphLaceCodeLen(headerSize2);
+        codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
+
+        off_t off = 0;
+        sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
+        uint8_t *codecPrivateData = codecPrivateBuf->data();
+        codecPrivateData[off++] = 2;
+
+        off += XiphLaceEnc(codecPrivateData + off, headerSize1);
+        off += XiphLaceEnc(codecPrivateData + off, headerSize2);
+
+        memcpy(codecPrivateData + off, headerData1, headerSize1);
+        off += headerSize1;
+        memcpy(codecPrivateData + off, headerData2, headerSize2);
+        off += headerSize2;
+        memcpy(codecPrivateData + off, headerData3, headerSize3);
+
+        sp<WebmElement> entry =
+                WebmElement::AudioTrackEntry("A_VORBIS", nChannels, samplerate, codecPrivateBuf);
+        return entry;
+    } else {
+        ALOGE("Track (%s) is not a supported audio format", mimeType);
+        return NULL;
+    }
 }
 
 size_t WebmWriter::numTracks() {
@@ -382,16 +440,18 @@
     const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
     const char *vp9 = MEDIA_MIMETYPE_VIDEO_VP9;
     const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;
+    const char* opus = MEDIA_MIMETYPE_AUDIO_OPUS;
 
     size_t streamIndex;
     if (!strncasecmp(mime, vp8, strlen(vp8)) ||
         !strncasecmp(mime, vp9, strlen(vp9))) {
         streamIndex = kVideoIndex;
-    } else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
+    } else if (!strncasecmp(mime, vorbis, strlen(vorbis)) ||
+               !strncasecmp(mime, opus, strlen(opus))) {
         streamIndex = kAudioIndex;
     } else {
-        ALOGE("Track (%s) other than %s, %s or %s is not supported",
-              mime, vp8, vp9, vorbis);
+        ALOGE("Track (%s) other than %s, %s, %s, or %s is not supported",
+              mime, vp8, vp9, vorbis, opus);
         return ERROR_UNSUPPORTED;
     }
 
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index e355183..6976950 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -61,7 +61,6 @@
     ],
 
     cflags: [
-        "-fvisibility=hidden",
         "-DEXPORT=__attribute__((visibility(\"default\")))",
         "-Werror",
         "-Wall",
@@ -105,6 +104,10 @@
         },
     },
     version_script: "libmediandk.map.txt",
+    stubs: {
+        symbol_file: "libmediandk.map.txt",
+        versions: ["29"],
+    },
 }
 
 llndk_library {
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index bc140bf..e0af80d 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -273,6 +273,9 @@
 EXPORT const char* AMEDIAFORMAT_KEY_ALBUMARTIST = "albumartist";
 EXPORT const char* AMEDIAFORMAT_KEY_ARTIST = "artist";
 EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO = "audio-presentation-info";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PRESENTATION_ID =
+        "audio-presentation-presentation-id";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PROGRAM_ID = "audio-presentation-program-id";
 EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
 EXPORT const char* AMEDIAFORMAT_KEY_AUTHOR = "author";
 EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
@@ -320,6 +323,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
 EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
 EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO = "hdr10-plus-info";
 EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
 EXPORT const char* AMEDIAFORMAT_KEY_ICC_PROFILE = "icc-profile";
 EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index 9dc120d..b3ee853 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -241,12 +241,6 @@
 AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
- * Get format of the buffer. The specified buffer index must have been previously obtained from
- * dequeueOutputBuffer.
- */
-AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(21);
-
-/**
  * If you are done with a buffer, use this call to return the buffer to
  * the codec. If you previously specified a surface when configuring this
  * video decoder you can optionally render the buffer.
@@ -353,6 +347,12 @@
 #if __ANDROID_API__ >= 28
 
 /**
+ * Get format of the buffer. The specified buffer index must have been previously obtained from
+ * dequeueOutputBuffer.
+ */
+AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(28);
+
+/**
  * Get the component name. If the codec was created by createDecoderByType
  * or createEncoderByType, what component is chosen is not known beforehand.
  * Caller shall call AMediaCodec_releaseName to free the returned pointer.
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 13d9135..2cd1d04 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -181,6 +181,8 @@
 extern const char* AMEDIAFORMAT_KEY_ALBUMARTIST __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_ARTIST __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PRESENTATION_ID __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_PROGRAM_ID __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_AUTHOR __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_BITS_PER_SAMPLE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_CDTRACKNUMBER __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 88736ab..3567899 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -158,7 +158,7 @@
     AMediaCodec_dequeueInputBuffer;
     AMediaCodec_dequeueOutputBuffer;
     AMediaCodec_flush;
-    AMediaCodec_getBufferFormat; # introduced=21
+    AMediaCodec_getBufferFormat; # introduced=28
     AMediaCodec_getInputBuffer;
     AMediaCodec_getInputFormat; # introduced=28
     AMediaCodec_getName; # introduced=28
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1b20693..6c698f6 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -365,16 +365,17 @@
     static inline bool isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) {
         switch (audio_channel_mask_get_representation(channelMask)) {
         case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
-            uint32_t channelCount = FCC_2; // stereo is default
-            if (kEnableExtendedChannels) {
-                channelCount = audio_channel_count_from_out_mask(channelMask);
-                if (channelCount < FCC_2 // mono is not supported at this time
-                        || channelCount > AudioMixer::MAX_NUM_CHANNELS) {
-                    return false;
-                }
+            // Haptic channel mask is only applicable for channel position mask.
+            const uint32_t channelCount = audio_channel_count_from_out_mask(
+                    channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
+            const uint32_t maxChannelCount = kEnableExtendedChannels
+                    ? AudioMixer::MAX_NUM_CHANNELS : FCC_2;
+            if (channelCount < FCC_2 // mono is not supported at this time
+                    || channelCount > maxChannelCount) {
+                return false;
             }
             // check that channelMask is the "canonical" one we expect for the channelCount.
-            return channelMask == audio_channel_out_mask_from_count(channelCount);
+            return audio_channel_position_mask_is_out_canonical(channelMask);
             }
         case AUDIO_CHANNEL_REPRESENTATION_INDEX:
             if (kEnableExtendedChannels) {
diff --git a/services/audioflinger/BufLog.cpp b/services/audioflinger/BufLog.cpp
index ae96036..5f6aca0 100644
--- a/services/audioflinger/BufLog.cpp
+++ b/services/audioflinger/BufLog.cpp
@@ -115,7 +115,7 @@
         unsigned int samplingRate,
         size_t maxBytes = 0) : mId(id), mFormat(format), mChannels(channels),
                 mSamplingRate(samplingRate), mMaxBytes(maxBytes) {
-    mByteCount = 0l;
+    mByteCount = 0;
     mPaused = false;
     if (tag != NULL) {
         (void)audio_utils_strlcpy(mTag, tag);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 27aec9e..3dae1e9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7367,8 +7367,7 @@
         status_t status = NO_ERROR;
         if (recordTrack->isExternalTrack()) {
             mLock.unlock();
-            bool silenced;
-            status = AudioSystem::startInput(recordTrack->portId(), &silenced);
+            status = AudioSystem::startInput(recordTrack->portId());
             mLock.lock();
             if (recordTrack->isInvalid()) {
                 recordTrack->clearSyncStartEvent();
@@ -7396,7 +7395,6 @@
                 recordTrack->clearSyncStartEvent();
                 return status;
             }
-            recordTrack->setSilenced(silenced);
         }
         // Catch up with current buffer indices if thread is already running.
         // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
@@ -8346,11 +8344,10 @@
         return BAD_VALUE;
     }
 
-    bool silenced = false;
     if (isOutput()) {
         ret = AudioSystem::startOutput(portId);
     } else {
-        ret = AudioSystem::startInput(portId, &silenced);
+        ret = AudioSystem::startInput(portId);
     }
 
     Mutex::Autolock _l(mLock);
@@ -8371,21 +8368,21 @@
         return PERMISSION_DENIED;
     }
 
-    if (isOutput()) {
-        // force volume update when a new track is added
-        mHalVolFloat = -1.0f;
-    } else if (!silenced) {
-        for (const sp<MmapTrack> &track : mActiveTracks) {
-            if (track->isSilenced_l() && track->uid() != client.clientUid)
-                track->invalidate();
-        }
-    }
-
     // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
     sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
                                         isOutput(), client.clientUid, client.clientPid, portId);
 
-    track->setSilenced_l(silenced);
+    if (isOutput()) {
+        // force volume update when a new track is added
+        mHalVolFloat = -1.0f;
+    } else if (!track->isSilenced_l()) {
+        for (const sp<MmapTrack> &t : mActiveTracks) {
+            if (t->isSilenced_l() && t->uid() != client.clientUid)
+                t->invalidate();
+        }
+    }
+
+
     mActiveTracks.add(track);
     sp<EffectChain> chain = getEffectChain_l(mSessionId);
     if (chain != 0) {
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 3c3a82b..ea6389c 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -65,20 +65,6 @@
         API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
     } input_type_t;
 
-    enum {
-        API_INPUT_CONCURRENCY_NONE = 0,
-        API_INPUT_CONCURRENCY_CALL = (1 << 0),      // Concurrency with a call
-        API_INPUT_CONCURRENCY_CAPTURE = (1 << 1),   // Concurrency with another capture
-        API_INPUT_CONCURRENCY_HOTWORD = (1 << 2),   // Concurrency with a hotword
-        API_INPUT_CONCURRENCY_PREEMPT = (1 << 3),   // pre-empted someone
-                // NB: preempt is marked on a successful return, others are on failing calls
-        API_INPUT_CONCURRENCY_LAST = (1 << 4),
-
-        API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_LAST - 1),
-    };
-
-    typedef uint32_t concurrency_type__mask_t;
-
 public:
     virtual ~AudioPolicyInterface() {}
     //
@@ -141,9 +127,7 @@
                                      input_type_t *inputType,
                                      audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the input starts being used.
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool silenced,
-                                concurrency_type__mask_t *concurrency) = 0;
+    virtual status_t startInput(audio_port_handle_t portId) = 0;
     // indicates to the audio policy manager that the input stops being used.
     virtual status_t stopInput(audio_port_handle_t portId) = 0;
     // releases the input.
@@ -197,6 +181,8 @@
     virtual status_t    dump(int fd) = 0;
 
     virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo) = 0;
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes) = 0;
 
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
@@ -242,6 +228,8 @@
                                         bool reported) = 0;
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
 
+    virtual bool     isHapticPlaybackSupported() = 0;
+
     virtual void     setAppState(uid_t uid, app_state_t state);
 };
 
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 9bd68e1..30b0044 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -32,14 +32,6 @@
 #define MAX_MIXER_CHANNEL_COUNT FCC_8
 
 /**
- * A device mask for all audio input devices that are considered "virtual" when evaluating
- * active inputs in getActiveInputs()
- */
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|\
-        AUDIO_DEVICE_IN_BUS|AUDIO_DEVICE_IN_FM_TUNER)
-
-
-/**
  * A device mask for all audio input and output devices where matching inputs/outputs on device
  * type alone is not enough: the address must match too
  */
@@ -68,23 +60,6 @@
 }
 
 /**
- * Check if the input device given is considered as a virtual device.
- *
- * @param[in] device to consider
- *
- * @return true if the device is a virtual one, false otherwise.
- */
-static inline bool is_virtual_input_device(audio_devices_t device)
-{
-    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
-        device &= ~AUDIO_DEVICE_BIT_IN;
-        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
-            return true;
-    }
-    return false;
-}
-
-/**
  * Check whether the device type is one
  * where addresses are used to distinguish between one connected device and another
  *
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 6e4c044..9f8b8c0 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -58,9 +58,8 @@
     void clearPreemptedSessions();
     bool isActive() const { return mGlobalActiveCount > 0; }
     bool isSourceActive(audio_source_t source) const;
-    audio_source_t inputSource(bool activeOnly = false) const;
+    audio_source_t source() const;
     bool isSoundTrigger() const;
-    audio_source_t getHighestPrioritySource(bool activeOnly) const;
     void setClientActive(const sp<RecordClientDescriptor>& client, bool active);
     int32_t activeCount() { return mGlobalActiveCount; }
 
@@ -121,7 +120,7 @@
      * Only considers inputs from physical devices (e.g. main mic, headset mic) when
      * ignoreVirtualInputs is true.
      */
-    Vector<sp <AudioInputDescriptor> > getActiveInputs(bool ignoreVirtualInputs = true);
+    Vector<sp <AudioInputDescriptor> > getActiveInputs();
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 030bf4b..986d109 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -22,6 +22,7 @@
 #include <sys/types.h>
 
 #include <system/audio.h>
+#include <system/audio_policy.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -106,7 +107,7 @@
                         audio_port_handle_t preferredDeviceId,
                         audio_source_t source, audio_input_flags_t flags, bool isSoundTrigger) :
         ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
-        mSource(source), mFlags(flags), mIsSoundTrigger(isSoundTrigger), mSilenced(false) {}
+        mSource(source), mFlags(flags), mIsSoundTrigger(isSoundTrigger), mAppState(APP_STATE_IDLE) {}
     ~RecordClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
@@ -115,14 +116,16 @@
     audio_source_t source() const { return mSource; }
     audio_input_flags_t flags() const { return mFlags; }
     bool isSoundTrigger() const { return mIsSoundTrigger; }
-    void setSilenced(bool silenced) { mSilenced = silenced; }
-    bool isSilenced() const { return mSilenced; }
+    void setAppState(app_state_t appState) { mAppState = appState; }
+    app_state_t appState() { return mAppState; }
+    bool isSilenced() const { return mAppState == APP_STATE_IDLE; }
 
 private:
     const audio_source_t mSource;
     const audio_input_flags_t mFlags;
     const bool mIsSoundTrigger;
-          bool mSilenced;
+          app_state_t mAppState;
+
 };
 
 class SourceClientDescriptor: public TrackClientDescriptor
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index eb32959..8ff8238 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -35,7 +35,7 @@
 public:
     IOProfile(const String8 &name, audio_port_role_t role)
         : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
-          maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
+          maxOpenCount(1),
           curOpenCount(0),
           maxActiveCount(1),
           curActiveCount(0) {}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 1f29874..559274f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -53,9 +53,32 @@
     return mId;
 }
 
-audio_source_t AudioInputDescriptor::inputSource(bool activeOnly) const
+audio_source_t AudioInputDescriptor::source() const
 {
-    return getHighestPrioritySource(activeOnly);
+    audio_source_t source = AUDIO_SOURCE_DEFAULT;
+
+    for (bool activeOnly : { true, false }) {
+        int32_t topPriority = -1;
+        app_state_t topState = APP_STATE_IDLE;
+        for (const auto &client : getClientIterable()) {
+            if (activeOnly && !client->active()) {
+                continue;
+            }
+            app_state_t curState = client->appState();
+            if (curState >= topState) {
+                int32_t curPriority = source_priority(client->source());
+                if (curPriority > topPriority) {
+                    source = client->source();
+                    topPriority = curPriority;
+                }
+                topState = curState;
+            }
+        }
+        if (source != AUDIO_SOURCE_DEFAULT) {
+            break;
+        }
+    }
+    return source;
 }
 
 void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -76,7 +99,7 @@
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
     dstConfig->ext.mix.hw_module = getModuleHandle();
     dstConfig->ext.mix.handle = mIoHandle;
-    dstConfig->ext.mix.usecase.source = inputSource();
+    dstConfig->ext.mix.usecase.source = source();
 }
 
 void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
@@ -125,24 +148,6 @@
     return false;
 }
 
-audio_source_t AudioInputDescriptor::getHighestPrioritySource(bool activeOnly) const
-{
-    audio_source_t source = AUDIO_SOURCE_DEFAULT;
-    int32_t priority = -1;
-
-    for (const auto &client : getClientIterable()) {
-        if (activeOnly && !client->active() ) {
-            continue;
-        }
-        int32_t curPriority = source_priority(client->source());
-        if (curPriority > priority) {
-            priority = curPriority;
-            source = client->source();
-        }
-    }
-    return source;
-}
-
 bool AudioInputDescriptor::isSoundTrigger() const {
     // sound trigger and non sound trigger clients are not mixed on a given input
     // so check only first client
@@ -224,7 +229,7 @@
 
 status_t AudioInputDescriptor::start()
 {
-    if (mGlobalActiveCount == 1) {
+    if (!isActive()) {
         if (!mProfile->canStartNewIo()) {
             ALOGI("%s mProfile->curActiveCount %d", __func__, mProfile->curActiveCount);
             return INVALID_OPERATION;
@@ -388,15 +393,13 @@
     return count;
 }
 
-Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs(bool ignoreVirtualInputs)
+Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs()
 {
     Vector<sp <AudioInputDescriptor> > activeInputs;
 
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
-        if ((inputDescriptor->isActive())
-                && (!ignoreVirtualInputs ||
-                    !is_virtual_input_device(inputDescriptor->mDevice))) {
+        if (inputDescriptor->isActive()) {
             activeInputs.add(inputDescriptor);
         }
     }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index c50839d..f07b797 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -484,7 +484,7 @@
 
     audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(outputDevice, mOutputs);
-    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+    audio_io_handle_t output = selectOutput(outputs);
     // request to reuse existing output stream if one is already opened to reach the target device
     if (output != AUDIO_IO_HANDLE_NONE) {
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
@@ -712,22 +712,25 @@
     ALOGV("setSystemProperty() property %s, value %s", property, value);
 }
 
-// Find a direct output profile compatible with the parameters passed, even if the input flags do
-// not explicitly request a direct output
-sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
-                                                               audio_devices_t device,
-                                                               uint32_t samplingRate,
-                                                               audio_format_t format,
-                                                               audio_channel_mask_t channelMask,
-                                                               audio_output_flags_t flags)
+// Find an output profile compatible with the parameters passed. When "directOnly" is set, restrict
+// search to profiles for direct outputs.
+sp<IOProfile> AudioPolicyManager::getProfileForOutput(
+                                                   audio_devices_t device,
+                                                   uint32_t samplingRate,
+                                                   audio_format_t format,
+                                                   audio_channel_mask_t channelMask,
+                                                   audio_output_flags_t flags,
+                                                   bool directOnly)
 {
-    // only retain flags that will drive the direct output profile selection
-    // if explicitly requested
-    static const uint32_t kRelevantFlags =
-            (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
-             AUDIO_OUTPUT_FLAG_VOIP_RX);
-    flags =
-        (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+    if (directOnly) {
+        // only retain flags that will drive the direct output profile selection
+        // if explicitly requested
+        static const uint32_t kRelevantFlags =
+                (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
+                 AUDIO_OUTPUT_FLAG_VOIP_RX);
+        flags =
+            (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
 
     sp<IOProfile> profile;
 
@@ -744,7 +747,9 @@
             if ((mAvailableOutputDevices.types() & curProfile->getSupportedDevicesType()) == 0) {
                 continue;
             }
-            // if several profiles are compatible, give priority to one with offload capability
+            if (!directOnly) return curProfile;
+            // when searching for direct outputs, if several profiles are compatible, give priority
+            // to one with offload capability
             if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
                 continue;
             }
@@ -769,7 +774,7 @@
     // and AudioSystem::getOutputSamplingRate().
 
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
-    audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+    audio_io_handle_t output = selectOutput(outputs);
 
     ALOGV("getOutput() stream %d selected device %08x, output %d", stream, device, output);
     return output;
@@ -980,11 +985,12 @@
 
     if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
             !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
-        profile = getProfileForDirectOutput(device,
-                                           config->sample_rate,
-                                           config->format,
-                                           config->channel_mask,
-                                           (audio_output_flags_t)*flags);
+        profile = getProfileForOutput(device,
+                                   config->sample_rate,
+                                   config->format,
+                                   config->channel_mask,
+                                   (audio_output_flags_t)*flags,
+                                   true /* directOnly */);
     }
 
     if (profile != 0) {
@@ -1085,7 +1091,8 @@
 
         // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
         *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
-        output = selectOutput(outputs, *flags, config->format);
+        output = selectOutput(outputs, *flags, config->format,
+                config->channel_mask, config->sample_rate);
     }
     ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
             "sampling rate %d, format %#x, channels %#x, flags %#x",
@@ -1246,15 +1253,18 @@
 
 audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                                        audio_output_flags_t flags,
-                                                       audio_format_t format)
+                                                       audio_format_t format,
+                                                       audio_channel_mask_t channelMask,
+                                                       uint32_t samplingRate)
 {
     // select one output among several that provide a path to a particular device or set of
     // devices (the list was previously build by getOutputsForDevice()).
     // The priority is as follows:
-    // 1: the output with the highest number of requested policy flags
-    // 2: the output with the bit depth the closest to the requested one
-    // 3: the primary output
-    // 4: the first output in the list
+    // 1: the output supporting haptic playback when requesting haptic playback
+    // 2: the output with the highest number of requested policy flags
+    // 3: the output with the bit depth the closest to the requested one
+    // 4: the primary output
+    // 5: the first output in the list
 
     if (outputs.size() == 0) {
         return AUDIO_IO_HANDLE_NONE;
@@ -1264,6 +1274,8 @@
     }
 
     int maxCommonFlags = 0;
+    const size_t hapticChannelCount = audio_channel_count_from_out_mask(
+            channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
     audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
     audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
     audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
@@ -1276,6 +1288,24 @@
             if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
                 continue;
             }
+            // If haptic channel is specified, use the haptic output if present.
+            // When using haptic output, same audio format and sample rate are required.
+            if (hapticChannelCount > 0) {
+                // If haptic channel is specified, use the first output that
+                // support haptic playback.
+                if (audio_channel_count_from_out_mask(
+                        outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) >= hapticChannelCount
+                        && format == outputDesc->mFormat
+                        && samplingRate == outputDesc->mSamplingRate) {
+                    return output;
+                }
+            } else {
+                // When haptic channel is not specified, skip haptic output.
+                if (outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
+                    continue;
+                }
+            }
+
             // if a valid format is specified, skip output if not compatible
             if (format != AUDIO_FORMAT_INVALID) {
                 if (!audio_is_linear_pcm(format)) {
@@ -1889,7 +1919,38 @@
     }
 
     if (!profile->canOpenNewIo()) {
-        return AUDIO_IO_HANDLE_NONE;
+        for (size_t i = 0; i < mInputs.size(); ) {
+            sp <AudioInputDescriptor> desc = mInputs.valueAt(i);
+            if (desc->mProfile != profile) {
+                continue;
+            }
+            // if sound trigger, reuse input if used by other sound trigger on same session
+            // else
+            //    reuse input if active client app is not in IDLE state
+            //
+            RecordClientVector clients = desc->clientsList();
+            bool doClose = false;
+            for (const auto& client : clients) {
+                if (isSoundTrigger != client->isSoundTrigger()) {
+                    continue;
+                }
+                if (client->isSoundTrigger()) {
+                    if (session == client->session()) {
+                        return desc->mIoHandle;
+                    }
+                    continue;
+                }
+                if (client->active() && client->appState() != APP_STATE_IDLE) {
+                    return desc->mIoHandle;
+                }
+                doClose = true;
+            }
+            if (doClose) {
+                closeInput(desc->mIoHandle);
+            } else {
+                i++;
+            }
+        }
     }
 
     sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
@@ -1930,55 +1991,8 @@
     return input;
 }
 
-//static
-bool AudioPolicyManager::isConcurrentSource(audio_source_t source)
+status_t AudioPolicyManager::startInput(audio_port_handle_t portId)
 {
-    return (source == AUDIO_SOURCE_HOTWORD) ||
-            (source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
-            (source == AUDIO_SOURCE_FM_TUNER);
-}
-
-// FIXME: remove when concurrent capture is ready. This is a hack to work around bug b/63083537.
-bool AudioPolicyManager::soundTriggerSupportsConcurrentCapture() {
-    if (!mHasComputedSoundTriggerSupportsConcurrentCapture) {
-        bool soundTriggerSupportsConcurrentCapture = false;
-        unsigned int numModules = 0;
-        struct sound_trigger_module_descriptor* nModules = NULL;
-
-        status_t status = SoundTrigger::listModules(nModules, &numModules);
-        if (status == NO_ERROR && numModules != 0) {
-            nModules = (struct sound_trigger_module_descriptor*) calloc(
-                    numModules, sizeof(struct sound_trigger_module_descriptor));
-            if (nModules == NULL) {
-              // We failed to malloc the buffer, so just say no for now, and hope that we have more
-              // ram the next time this function is called.
-              ALOGE("Failed to allocate buffer for module descriptors");
-              return false;
-            }
-
-            status = SoundTrigger::listModules(nModules, &numModules);
-            if (status == NO_ERROR) {
-                soundTriggerSupportsConcurrentCapture = true;
-                for (size_t i = 0; i < numModules; ++i) {
-                    soundTriggerSupportsConcurrentCapture &=
-                            nModules[i].properties.concurrent_capture;
-                }
-            }
-            free(nModules);
-        }
-        mSoundTriggerSupportsConcurrentCapture = soundTriggerSupportsConcurrentCapture;
-        mHasComputedSoundTriggerSupportsConcurrentCapture = true;
-    }
-    return mSoundTriggerSupportsConcurrentCapture;
-}
-
-
-status_t AudioPolicyManager::startInput(audio_port_handle_t portId,
-                                        bool silenced,
-                                        concurrency_type__mask_t *concurrency)
-{
-    *concurrency = API_INPUT_CONCURRENCY_NONE;
-
     ALOGV("%s portId %d", __FUNCTION__, portId);
 
     sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
@@ -1995,106 +2009,16 @@
 
     audio_session_t session = client->session();
 
-    ALOGV("%s input:%d, session:%d, silenced:%d, concurrency:%d)",
-        __FUNCTION__, input, session, silenced, *concurrency);
+    ALOGV("%s input:%d, session:%d)", __FUNCTION__, input, session);
 
-    if (!is_virtual_input_device(inputDesc->mDevice)) {
-        if (mCallTxPatch != 0 &&
-            inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
-            ALOGW("startInput(%d) failed: call in progress", input);
-            *concurrency |= API_INPUT_CONCURRENCY_CALL;
-            return INVALID_OPERATION;
-        }
+    Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs();
 
-        Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs();
-
-        // If a UID is idle and records silence and another not silenced recording starts
-        // from another UID (idle or active) we stop the current idle UID recording in
-        // favor of the new one - "There can be only one" TM
-        if (!silenced) {
-            for (const auto& activeDesc : activeInputs) {
-                if ((activeDesc->getAudioPort()->getFlags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
-                        activeDesc->getId() == inputDesc->getId()) {
-                     continue;
-                }
-
-                RecordClientVector activeClients = activeDesc->clientsList(true /*activeOnly*/);
-                for (const auto& activeClient : activeClients) {
-                    if (activeClient->isSilenced()) {
-                        closeClient(activeClient->portId());
-                        ALOGV("%s client %d stopping silenced client %d", __FUNCTION__,
-                              portId, activeClient->portId());
-                        activeInputs = mInputs.getActiveInputs();
-                    }
-                }
-            }
-        }
-
-        for (const auto& activeDesc : activeInputs) {
-            if ((client->flags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
-                    activeDesc->getId() == inputDesc->getId()) {
-                continue;
-            }
-
-            audio_source_t activeSource = activeDesc->inputSource(true);
-            if (client->source() == AUDIO_SOURCE_HOTWORD) {
-                if (activeSource == AUDIO_SOURCE_HOTWORD) {
-                    if (activeDesc->hasPreemptedSession(session)) {
-                        ALOGW("%s input %d failed for HOTWORD: "
-                                "other input %d already started for HOTWORD", __FUNCTION__,
-                              input, activeDesc->mIoHandle);
-                        *concurrency |= API_INPUT_CONCURRENCY_HOTWORD;
-                        return INVALID_OPERATION;
-                    }
-                } else {
-                    ALOGV("%s input %d failed for HOTWORD: other input %d already started",
-                        __FUNCTION__, input, activeDesc->mIoHandle);
-                    *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
-                    return INVALID_OPERATION;
-                }
-            } else {
-                if (activeSource != AUDIO_SOURCE_HOTWORD) {
-                    ALOGW("%s input %d failed: other input %d already started", __FUNCTION__,
-                          input, activeDesc->mIoHandle);
-                    *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
-                    return INVALID_OPERATION;
-                }
-            }
-        }
-
-        // We only need to check if the sound trigger session supports concurrent capture if the
-        // input is also a sound trigger input. Otherwise, we should preempt any hotword stream
-        // that's running.
-        const bool allowConcurrentWithSoundTrigger =
-            inputDesc->isSoundTrigger() ? soundTriggerSupportsConcurrentCapture() : false;
-
-        // if capture is allowed, preempt currently active HOTWORD captures
-        for (const auto& activeDesc : activeInputs) {
-            if (allowConcurrentWithSoundTrigger && activeDesc->isSoundTrigger()) {
-                continue;
-            }
-            RecordClientVector activeHotwordClients =
-                activeDesc->clientsList(true, AUDIO_SOURCE_HOTWORD);
-            if (activeHotwordClients.size() > 0) {
-                SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
-
-                for (const auto& activeClient : activeHotwordClients) {
-                    *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
-                    sessions.add(activeClient->session());
-                    closeClient(activeClient->portId());
-                    ALOGV("%s input %d for HOTWORD preempting HOTWORD input %d", __FUNCTION__,
-                          input, activeDesc->mIoHandle);
-                }
-
-                inputDesc->setPreemptedSessions(sessions);
-            }
-        }
+    status_t status = inputDesc->start();
+    if (status != NO_ERROR) {
+        return status;
     }
 
-    // Make sure we start with the correct silence state
-    client->setSilenced(silenced);
-
-    // increment activity count before calling getNewInputDevice() below as only active sessions
+  // increment activity count before calling getNewInputDevice() below as only active sessions
     // are considered for device selection
     inputDesc->setClientActive(client, true);
 
@@ -2103,12 +2027,6 @@
     audio_devices_t device = getNewInputDevice(inputDesc);
     setInputDevice(input, device, true /* force */);
 
-    status_t status = inputDesc->start();
-    if (status != NO_ERROR) {
-        inputDesc->setClientActive(client, false);
-        return status;
-    }
-
     if (inputDesc->activeCount()  == 1) {
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((inputDesc->mPolicyMix != NULL)
@@ -2327,7 +2245,7 @@
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
+        audio_devices_t curDevice = desc->device();
         for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
             if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
                 continue;
@@ -2345,7 +2263,7 @@
             bool applyVolume;
             if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
                 curStreamDevice |= device;
-                applyVolume = (curDevice & curStreamDevice) != 0;
+                applyVolume = (Volume::getDeviceForVolume(curDevice) & curStreamDevice) != 0;
             } else {
                 applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
                         stream, curStreamDevice);
@@ -2803,15 +2721,34 @@
 
     // See if there is a profile to support this.
     // AUDIO_DEVICE_NONE
-    sp<IOProfile> profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */,
+    sp<IOProfile> profile = getProfileForOutput(AUDIO_DEVICE_NONE /*ignore device */,
                                             offloadInfo.sample_rate,
                                             offloadInfo.format,
                                             offloadInfo.channel_mask,
-                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,
+                                            true /* directOnly */);
     ALOGV("isOffloadSupported() profile %sfound", profile != 0 ? "" : "NOT ");
     return (profile != 0);
 }
 
+bool AudioPolicyManager::isDirectOutputSupported(const audio_config_base_t& config,
+                                                 const audio_attributes_t& attributes) {
+    audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
+    audio_attributes_flags_to_audio_output_flags(attributes.flags, output_flags);
+    sp<IOProfile> profile = getProfileForOutput(AUDIO_DEVICE_NONE /*ignore device */,
+                                            config.sample_rate,
+                                            config.format,
+                                            config.channel_mask,
+                                            output_flags,
+                                            true /* directOnly */);
+    ALOGV("%s() profile %sfound with name: %s, "
+        "sample rate: %u, format: 0x%x, channel_mask: 0x%x, output flags: 0x%x",
+        __FUNCTION__, profile != 0 ? "" : "NOT ",
+        (profile != 0 ? profile->getTagName().string() : "null"),
+        config.sample_rate, config.format, config.channel_mask, output_flags);
+    return (profile != 0);
+}
+
 status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
@@ -3123,9 +3060,7 @@
                                             getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
                     // if the sink device is reachable via an opened output stream, request to go via
                     // this output stream by adding a second source to the patch description
-                    audio_io_handle_t output = selectOutput(outputs,
-                                                            AUDIO_OUTPUT_FLAG_NONE,
-                                                            AUDIO_FORMAT_INVALID);
+                    audio_io_handle_t output = selectOutput(outputs);
                     if (output != AUDIO_IO_HANDLE_NONE) {
                         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
                         if (outputDesc->isDuplicated()) {
@@ -3366,7 +3301,7 @@
     SortedVector<audio_io_handle_t> inputsToClose;
     for (size_t i = 0; i < mInputs.size(); i++) {
         sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
-        if (affectedSources.indexOf(inputDesc->inputSource()) >= 0) {
+        if (affectedSources.indexOf(inputDesc->source()) >= 0) {
             inputsToClose.add(inputDesc->mIoHandle);
         }
     }
@@ -3469,8 +3404,7 @@
         //   create Hwoutput and add to mHwOutputs
     } else {
         SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(sinkDevice, mOutputs);
-        audio_io_handle_t output =
-                selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+        audio_io_handle_t output = selectOutput(outputs);
         if (output == AUDIO_IO_HANDLE_NONE) {
             ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevice);
             return INVALID_OPERATION;
@@ -3728,21 +3662,37 @@
 void AudioPolicyManager::setAppState(uid_t uid, app_state_t state)
 {
     Vector<sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
-    bool silenced = state == APP_STATE_IDLE;
 
-    ALOGV("AudioPolicyManager:setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
+    ALOGV("%s(uid:%d, state:%d)", __func__, uid, state);
 
     for (size_t i = 0; i < activeInputs.size(); i++) {
         sp<AudioInputDescriptor> activeDesc = activeInputs[i];
         RecordClientVector clients = activeDesc->clientsList(true /*activeOnly*/);
         for (const auto& client : clients) {
             if (uid == client->uid()) {
-                client->setSilenced(silenced);
+                client->setAppState(state);
             }
         }
     }
 }
 
+bool AudioPolicyManager::isHapticPlaybackSupported()
+{
+    for (const auto& hwModule : mHwModules) {
+        const OutputProfileCollection &outputProfiles = hwModule->getOutputProfiles();
+        for (const auto &outProfile : outputProfiles) {
+            struct audio_port audioPort;
+            outProfile->toAudioPort(&audioPort);
+            for (size_t i = 0; i < audioPort.num_channel_masks; i++) {
+                if (audioPort.channel_masks[i] & AUDIO_CHANNEL_HAPTIC_ALL) {
+                    return true;
+                }
+            }
+        }
+    }
+    return false;
+}
+
 status_t AudioPolicyManager::disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
     ALOGV("%s port Id %d", __FUNCTION__, sourceDesc->portId());
@@ -3847,8 +3797,7 @@
     mBeaconMuted(false),
     mTtsOutputAvailable(false),
     mMasterMono(false),
-    mMusicEffectOutput(AUDIO_IO_HANDLE_NONE),
-    mHasComputedSoundTriggerSupportsConcurrentCapture(false)
+    mMusicEffectOutput(AUDIO_IO_HANDLE_NONE)
 {
 }
 
@@ -4901,7 +4850,7 @@
 
     // If we are not in call and no client is active on this input, this methods returns
     // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
-    audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
+    audio_source_t source = inputDesc->source();
     if (source == AUDIO_SOURCE_DEFAULT && isInCall()) {
         source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     }
@@ -5240,20 +5189,6 @@
             }
             installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
         }
-
-        // inform all input as well
-        for (size_t i = 0; i < mInputs.size(); i++) {
-            const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
-            if (!is_virtual_input_device(inputDescriptor->mDevice)) {
-                AudioParameter inputCmd = AudioParameter();
-                ALOGV("%s: inform input %d of device:%d", __func__,
-                      inputDescriptor->mIoHandle, device);
-                inputCmd.addInt(String8(AudioParameter::keyRouting),device);
-                mpClientInterface->setParameters(inputDescriptor->mIoHandle,
-                                                 inputCmd.toString(),
-                                                 delayMs);
-            }
-        }
     }
 
     // update stream volumes according to new device
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 8618e0c..d0708b8 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -134,9 +134,7 @@
                                          audio_port_handle_t *portId);
 
         // indicates to the audio policy manager that the input starts being used.
-        virtual status_t startInput(audio_port_handle_t portId,
-                                    bool silenced,
-                                    concurrency_type__mask_t *concurrency);
+        virtual status_t startInput(audio_port_handle_t portId);
 
         // indicates to the audio policy manager that the input stops being used.
         virtual status_t stopInput(audio_port_handle_t portId);
@@ -194,6 +192,9 @@
 
         virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
 
+        virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                             const audio_attributes_t& attributes);
+
         virtual status_t listAudioPorts(audio_port_role_t role,
                                         audio_port_type_t type,
                                         unsigned int *num_ports,
@@ -246,6 +247,8 @@
 
         virtual void setAppState(uid_t uid, app_state_t state);
 
+        virtual bool isHapticPlaybackSupported();
+
 protected:
         // A constructor that allows more fine-grained control over initialization process,
         // used in automatic tests.
@@ -472,8 +475,10 @@
                                             uint32_t delayMs);
 
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
-                                       audio_output_flags_t flags,
-                                       audio_format_t format);
+                                       audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                       audio_format_t format = AUDIO_FORMAT_INVALID,
+                                       audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE,
+                                       uint32_t samplingRate = 0);
         // samplingRate, format, channelMask are in/out and so may be modified
         sp<IOProfile> getInputProfile(audio_devices_t device,
                                       const String8& address,
@@ -481,11 +486,12 @@
                                       audio_format_t& format,
                                       audio_channel_mask_t& channelMask,
                                       audio_input_flags_t flags);
-        sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
-                                                       uint32_t samplingRate,
-                                                       audio_format_t format,
-                                                       audio_channel_mask_t channelMask,
-                                                       audio_output_flags_t flags);
+        sp<IOProfile> getProfileForOutput(audio_devices_t device,
+                                          uint32_t samplingRate,
+                                          audio_format_t format,
+                                          audio_channel_mask_t channelMask,
+                                          audio_output_flags_t flags,
+                                          bool directOnly);
 
         audio_io_handle_t selectOutputForMusicEffects();
 
@@ -544,8 +550,6 @@
 
         void clearAudioSources(uid_t uid);
 
-        static bool isConcurrentSource(audio_source_t source);
-
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
 
@@ -709,10 +713,6 @@
                 int delayMs,
                 uid_t uid,
                 sp<AudioPatch> *patchDescPtr);
-
-        bool soundTriggerSupportsConcurrentCapture();
-        bool mSoundTriggerSupportsConcurrentCapture;
-        bool mHasComputedSoundTriggerSupportsConcurrentCapture;
 };
 
 };
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 59c8f10..439764b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -454,23 +454,6 @@
     return rawbuffer;
 }
 
-static std::string audioConcurrencyString(
-        AudioPolicyInterface::concurrency_type__mask_t concurrency)
-{
-    char buffer[64]; // oversized
-    if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL) {
-        snprintf(buffer, sizeof(buffer), "%s%s%s%s",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL)? ",call":"",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE)? ",capture":"",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_HOTWORD)? ",hotword":"",
-            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_PREEMPT)? ",preempt":"");
-    } else {
-        snprintf(buffer, sizeof(buffer), ",none");
-    }
-
-    return &buffer[1];
-}
-
 std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
     std::string typeStr;
     struct audio_port port = {};
@@ -482,7 +465,7 @@
     return typeStr;
 }
 
-status_t AudioPolicyService::startInput(audio_port_handle_t portId, bool *silenced)
+status_t AudioPolicyService::startInput(audio_port_handle_t portId)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -505,17 +488,16 @@
         return PERMISSION_DENIED;
     }
 
-    // If UID inactive it records silence until becoming active
-    *silenced = !mUidPolicy->isUidActive(client->uid) && !client->isVirtualDevice;
-
     Mutex::Autolock _l(mLock);
-    AudioPolicyInterface::concurrency_type__mask_t concurrency =
-            AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE;
+
+    client->active = true;
+    client->startTimeNs = systemTime();
+    updateUidStates_l();
 
     status_t status;
     {
         AutoCallerClear acc;
-        status = mAudioPolicyManager->startInput(portId, *silenced, &concurrency);
+        status = mAudioPolicyManager->startInput(portId);
 
     }
 
@@ -524,7 +506,6 @@
 
         static constexpr char kAudioPolicy[] = "audiopolicy";
 
-        static constexpr char kAudioPolicyReason[] = "android.media.audiopolicy.reason";
         static constexpr char kAudioPolicyStatus[] = "android.media.audiopolicy.status";
         static constexpr char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
         static constexpr char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
@@ -541,7 +522,6 @@
         MediaAnalyticsItem *item = new MediaAnalyticsItem(kAudioPolicy);
         if (item != NULL) {
 
-            item->setCString(kAudioPolicyReason, audioConcurrencyString(concurrency).c_str());
             item->setInt32(kAudioPolicyStatus, status);
 
             item->setCString(kAudioPolicyRqstSrc,
@@ -556,54 +536,35 @@
             item->setCString(
                     kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
 
-            // figure out who is active
-            // NB: might the other party have given up the microphone since then? how sure.
-            // perhaps could have given up on it.
-            // we hold mLock, so perhaps we're safe for this looping
-            if (concurrency != AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE) {
-                int count = mAudioRecordClients.size();
-                for (int i = 0; i<count ; i++) {
-                    if (portId == mAudioRecordClients.keyAt(i)) {
-                        continue;
+            int count = mAudioRecordClients.size();
+            for (int i = 0; i < count ; i++) {
+                if (portId == mAudioRecordClients.keyAt(i)) {
+                    continue;
+                }
+                sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
+                if (other->active) {
+                    // keeps the last of the clients marked active
+                    item->setCString(kAudioPolicyActiveSrc,
+                                     audioSourceString(other->attributes.source).c_str());
+                    item->setInt32(kAudioPolicyActiveSession, other->session);
+                    if (other->opPackageName.size() != 0) {
+                        item->setCString(kAudioPolicyActivePkg,
+                             std::string(String8(other->opPackageName).string()).c_str());
+                    } else {
+                        item->setCString(kAudioPolicyRqstPkg,
+                                         std::to_string(other->uid).c_str());
                     }
-                    sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
-                    if (other->active) {
-                        // keeps the last of the clients marked active
-                        item->setCString(kAudioPolicyActiveSrc,
-                                         audioSourceString(other->attributes.source).c_str());
-                        item->setInt32(kAudioPolicyActiveSession, other->session);
-                        if (other->opPackageName.size() != 0) {
-                            item->setCString(kAudioPolicyActivePkg,
-                                 std::string(String8(other->opPackageName).string()).c_str());
-                        } else {
-                            item->setCString(kAudioPolicyRqstPkg,
-                                             std::to_string(other->uid).c_str());
-                        }
-                        item->setCString(kAudioPolicyActiveDevice,
-                                         getDeviceTypeStrForPortId(other->deviceId).c_str());
-                    }
+                    item->setCString(kAudioPolicyActiveDevice,
+                                     getDeviceTypeStrForPortId(other->deviceId).c_str());
                 }
             }
             item->selfrecord();
             delete item;
             item = NULL;
         }
-    }
-
-    if (status == NO_ERROR) {
-        LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
-                            "startInput(): invalid concurrency type %d", (int)concurrency);
-
-        // enforce permission (if any) required for each type of concurrency
-        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL) {
-            //TODO: check incall capture permission
-        }
-        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
-            //TODO: check concurrent capture permission
-        }
-
-        client->active = true;
-    } else {
+        client->active = false;
+        client->startTimeNs = 0;
+        updateUidStates_l();
         finishRecording(client->opPackageName, client->uid);
     }
 
@@ -615,6 +576,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+
     Mutex::Autolock _l(mLock);
 
     ssize_t index = mAudioRecordClients.indexOfKey(portId);
@@ -624,6 +586,9 @@
     sp<AudioRecordClient> client = mAudioRecordClients.valueAt(index);
 
     client->active = false;
+    client->startTimeNs = 0;
+
+    updateUidStates_l();
 
     // finish the recording app op
     finishRecording(client->opPackageName, client->uid);
@@ -646,6 +611,14 @@
             return;
         }
         client = mAudioRecordClients.valueAt(index);
+
+        if (client->active) {
+            ALOGW("%s releasing active client portId %d", __FUNCTION__, portId);
+            client->active = false;
+            client->startTimeNs = 0;
+            updateUidStates_l();
+        }
+
         mAudioRecordClients.removeItem(portId);
     }
     if (client == 0) {
@@ -936,6 +909,17 @@
     return mAudioPolicyManager->isOffloadSupported(info);
 }
 
+bool AudioPolicyService::isDirectOutputSupported(const audio_config_base_t& config,
+                                                 const audio_attributes_t& attributes) {
+    if (mAudioPolicyManager == NULL) {
+        ALOGV("mAudioPolicyManager == NULL");
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->isDirectOutputSupported(config, attributes);
+}
+
+
 status_t AudioPolicyService::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
@@ -1149,4 +1133,15 @@
     return NO_ERROR;
 }
 
+bool AudioPolicyService::isHapticPlaybackSupported()
+{
+    if (mAudioPolicyManager == NULL) {
+        ALOGW("%s, mAudioPolicyManager == NULL", __func__);
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    return mAudioPolicyManager->isHapticPlaybackSupported();
+}
+
 } // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 78dbf5f..ee5d6ff 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -348,11 +348,100 @@
 
 void AudioPolicyService::updateUidStates_l()
 {
-    //TODO: implement real concurrent capture policy: for now just apply each app state directly
+//    Go over all active clients and allow capture (does not force silence) in the
+//    following cases:
+//    The client is the assistant
+//        AND an accessibility service is on TOP
+//               AND the source is VOICE_RECOGNITION or HOTWORD
+//        OR uses VOICE_RECOGNITION AND is on TOP OR latest started
+//               OR uses HOTWORD
+//            AND there is no privacy sensitive active capture
+//    OR The client is an accessibility service
+//        AND is on TOP OR latest started
+//        AND the source is VOICE_RECOGNITION or HOTWORD
+//    OR Any other client
+//        AND The assistant is not on TOP
+//        AND is on TOP OR latest started
+//        AND there is no privacy sensitive active capture
+//TODO: mamanage pre processing effects according to use case priority
+
+    sp<AudioRecordClient> topActive;
+    sp<AudioRecordClient> latestActive;
+    sp<AudioRecordClient> latestSensitiveActive;
+    nsecs_t topStartNs = 0;
+    nsecs_t latestStartNs = 0;
+    nsecs_t latestSensitiveStartNs = 0;
+    bool isA11yOnTop = mUidPolicy->isA11yOnTop();
+    bool isAssistantOnTop = false;
+    bool isSensitiveActive = false;
+
     for (size_t i =0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
         if (!current->active) continue;
-        setAppState_l(current->uid, apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
+        if (isPrivacySensitive(current->attributes.source)) {
+            if (current->startTimeNs > latestSensitiveStartNs) {
+                latestSensitiveActive = current;
+                latestSensitiveStartNs = current->startTimeNs;
+            }
+            isSensitiveActive = true;
+        }
+        if (mUidPolicy->getUidState(current->uid) == ActivityManager::PROCESS_STATE_TOP) {
+            if (current->startTimeNs > topStartNs) {
+                topActive = current;
+                topStartNs = current->startTimeNs;
+            }
+            if (mUidPolicy->isAssistantUid(current->uid)) {
+                isAssistantOnTop = true;
+            }
+        }
+        if (current->startTimeNs > latestStartNs) {
+            latestActive = current;
+            latestStartNs = current->startTimeNs;
+        }
+    }
+
+    if (topActive == nullptr && latestActive == nullptr) {
+        return;
+    }
+
+    if (topActive != nullptr) {
+        latestActive = nullptr;
+    }
+
+    for (size_t i =0; i < mAudioRecordClients.size(); i++) {
+        sp<AudioRecordClient> current = mAudioRecordClients[i];
+        if (!current->active) continue;
+
+        audio_source_t source = current->attributes.source;
+        bool isOnTop = current == topActive;
+        bool isLatest = current == latestActive;
+        bool isLatestSensitive = current == latestSensitiveActive;
+        bool forceIdle = true;
+        if (mUidPolicy->isAssistantUid(current->uid)) {
+            if (isA11yOnTop) {
+                if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
+                    forceIdle = false;
+                }
+            } else {
+                if ((((isOnTop || isLatest) && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+                     source == AUDIO_SOURCE_HOTWORD) && !isSensitiveActive) {
+                    forceIdle = false;
+                }
+            }
+        } else if (mUidPolicy->isA11yUid(current->uid)) {
+            if ((isOnTop || isLatest) &&
+                (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
+                forceIdle = false;
+            }
+        } else {
+            if (!isAssistantOnTop && (isOnTop || isLatest) &&
+                (!isSensitiveActive || isLatestSensitive)) {
+                forceIdle = false;
+            }
+        }
+        setAppState_l(current->uid,
+                      forceIdle ? APP_STATE_IDLE :
+                                  apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
     }
 }
 
@@ -369,6 +458,22 @@
     return APP_STATE_FOREGROUND;
 }
 
+/* static */
+bool AudioPolicyService::isPrivacySensitive(audio_source_t source)
+{
+    switch (source) {
+        case AUDIO_SOURCE_VOICE_UPLINK:
+        case AUDIO_SOURCE_VOICE_DOWNLINK:
+        case AUDIO_SOURCE_VOICE_CALL:
+        case AUDIO_SOURCE_CAMCORDER:
+        case AUDIO_SOURCE_VOICE_COMMUNICATION:
+            return true;
+        default:
+            break;
+    }
+    return false;
+}
+
 void AudioPolicyService::setAppState_l(uid_t uid, app_state_t state)
 {
     AutoCallerClear acc;
@@ -548,6 +653,7 @@
         mObserverRegistered = true;
     } else {
         ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
+
         am.unregisterUidObserver(this);
     }
 }
@@ -650,6 +756,7 @@
         mCachedUids.insert(std::pair<uid_t,
                            std::pair<bool, int>>(uid, std::pair<bool, int>(active, state)));
     }
+
     return state;
 }
 
@@ -730,6 +837,21 @@
     }
 }
 
+bool AudioPolicyService::UidPolicy::isA11yOnTop() {
+    for (const auto &uid : mCachedUids) {
+        std::vector<uid_t>::iterator it = find(mA11yUids.begin(), mA11yUids.end(), uid.first);
+        if (it == mA11yUids.end()) {
+            continue;
+        }
+        if (uid.second.second == ActivityManager::PROCESS_STATE_TOP ||
+            uid.second.second == ActivityManager::PROCESS_STATE_FOREGROUND_SERVICE ||
+            uid.second.second == ActivityManager::PROCESS_STATE_BOUND_FOREGROUND_SERVICE) {
+            return true;
+        }
+    }
+    return false;
+}
+
 bool AudioPolicyService::UidPolicy::isA11yUid(uid_t uid)
 {
     std::vector<uid_t>::iterator it = find(mA11yUids.begin(), mA11yUids.end(), uid);
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 4d7235f..23c3daa 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -94,8 +94,7 @@
                                      audio_input_flags_t flags,
                                      audio_port_handle_t *selectedDeviceId = NULL,
                                      audio_port_handle_t *portId = NULL);
-    virtual status_t startInput(audio_port_handle_t portId,
-                                bool *silenced);
+    virtual status_t startInput(audio_port_handle_t portId);
     virtual status_t stopInput(audio_port_handle_t portId);
     virtual void releaseInput(audio_port_handle_t portId);
     virtual status_t initStreamVolume(audio_stream_type_t stream,
@@ -168,6 +167,8 @@
                                      int delayMs = 0);
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
+    virtual bool isDirectOutputSupported(const audio_config_base_t& config,
+                                         const audio_attributes_t& attributes);
 
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
@@ -217,6 +218,8 @@
     virtual status_t setAssistantUid(uid_t uid);
     virtual status_t setA11yServicesUids(const std::vector<uid_t>& uids);
 
+    virtual bool     isHapticPlaybackSupported();
+
             status_t doStopOutput(audio_port_handle_t portId);
             void doReleaseOutput(audio_port_handle_t portId);
 
@@ -276,6 +279,8 @@
     void updateUidStates();
     void updateUidStates_l();
 
+    static bool isPrivacySensitive(audio_source_t source);
+
     // If recording we need to make sure the UID is allowed to do that. If the UID is idle
     // then it cannot record and gets buffers with zeros - silence. As soon as the UID
     // transitions to an active state we will start reporting buffers with data. This approach
@@ -299,6 +304,7 @@
         bool isAssistantUid(uid_t uid) { return uid == mAssistantUid; }
         void setA11yUids(const std::vector<uid_t>& uids) { mA11yUids.clear(); mA11yUids = uids; }
         bool isA11yUid(uid_t uid);
+        bool isA11yOnTop();
 
         // BnUidObserver implementation
         void onUidActive(uid_t uid) override;
@@ -650,12 +656,11 @@
                           const audio_session_t session, const audio_port_handle_t deviceId,
                           const String16& opPackageName) :
                     AudioClient(attributes, io, uid, pid, session, deviceId),
-                    opPackageName(opPackageName), isConcurrent(false), isVirtualDevice(false) {}
+                    opPackageName(opPackageName), startTimeNs(0) {}
                 ~AudioRecordClient() override = default;
 
         const String16 opPackageName;        // client package name
-        bool isConcurrent;             // is allowed to concurrent capture
-        bool isVirtualDevice;          // uses virtual device: updated by APM::getInputForAttr()
+        nsecs_t startTimeNs;
     };
 
     // --- AudioPlaybackClient ---
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 73c9535..19ce7e9 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -9,6 +9,24 @@
 
 LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
 LOCAL_MODULE:= libmediaextractorservice
+
+sanitizer_runtime_libraries := $(call normalize-path-list,$(addsuffix .so,\
+  $(ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
+  $(UBSAN_RUNTIME_LIBRARY) \
+  $(TSAN_RUNTIME_LIBRARY)))
+
+# $(info Sanitizer:  $(sanitizer_runtime_libraries))
+
+ndk_libraries := $(call normalize-path-list,$(addprefix lib,$(addsuffix .so,\
+  $(NDK_PREBUILT_SHARED_LIBRARIES))))
+
+# $(info NDK:  $(ndk_libraries))
+
+LOCAL_CFLAGS += -DLINKED_LIBRARIES='"$(sanitizer_runtime_libraries):$(ndk_libraries)"'
+
+sanitizer_runtime_libraries :=
+ndk_libraries :=
+
 include $(BUILD_SHARED_LIBRARY)
 
 
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index f4d8b43..8b26178 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -29,6 +29,11 @@
 
 namespace android {
 
+MediaExtractorService::MediaExtractorService()
+        : BnMediaExtractorService() {
+    MediaExtractorFactory::SetLinkedLibraries(std::string(LINKED_LIBRARIES));
+}
+
 sp<IMediaExtractor> MediaExtractorService::makeExtractor(
         const sp<IDataSource> &remoteSource, const char *mime) {
     ALOGV("@@@ MediaExtractorService::makeExtractor for %s", mime);
diff --git a/services/mediaextractor/MediaExtractorService.h b/services/mediaextractor/MediaExtractorService.h
index 9df3ecd..6007004 100644
--- a/services/mediaextractor/MediaExtractorService.h
+++ b/services/mediaextractor/MediaExtractorService.h
@@ -27,7 +27,7 @@
 {
     friend class BinderService<MediaExtractorService>;    // for MediaExtractorService()
 public:
-    MediaExtractorService() : BnMediaExtractorService() { }
+    MediaExtractorService();
     virtual ~MediaExtractorService() { }
     virtual void onFirstRef() { }