Merge "Remove __ANDROID_API__ #if checks."
diff --git a/METADATA b/METADATA
index d97975c..1fbda08 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# *** THIS PACKAGE HAS SPECIAL LICENSING CONDITIONS.  PLEASE
+#     CONSULT THE OWNERS AND opensource-licensing@google.com BEFORE
+#     DEPENDING ON IT IN YOUR PROJECT. ***
 third_party {
-  license_type: NOTICE
+  # would be NOTICE save for drm/mediadrm/plugins/clearkey/hidl/
+  license_type: BY_EXCEPTION_ONLY
 }
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 6111fea..1d2562e 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -39,6 +39,8 @@
 
     static constexpr size_t kMinAllocBytesForEviction = 1024*1024*15;
     static constexpr size_t kMinBufferCountForEviction = 25;
+    static constexpr size_t kMaxUnusedBufferCount = 64;
+    static constexpr size_t kUnusedBufferCountTarget = kMaxUnusedBufferCount - 16;
 
     static constexpr nsecs_t kEvictGranularityNs = 1000000000; // 1 sec
     static constexpr nsecs_t kEvictDurationNs = 5000000000; // 5 secs
@@ -724,9 +726,11 @@
 }
 
 void Accessor::Impl::BufferPool::cleanUp(bool clearCache) {
-    if (clearCache || mTimestampUs > mLastCleanUpUs + kCleanUpDurationUs) {
+    if (clearCache || mTimestampUs > mLastCleanUpUs + kCleanUpDurationUs ||
+            mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
         mLastCleanUpUs = mTimestampUs;
-        if (mTimestampUs > mLastLogUs + kLogDurationUs) {
+        if (mTimestampUs > mLastLogUs + kLogDurationUs ||
+                mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
             mLastLogUs = mTimestampUs;
             ALOGD("bufferpool2 %p : %zu(%zu size) total buffers - "
                   "%zu(%zu size) used buffers - %zu/%zu (recycle/alloc) - "
@@ -737,8 +741,9 @@
                   mStats.mTotalFetches, mStats.mTotalTransfers);
         }
         for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
-            if (!clearCache && (mStats.mSizeCached < kMinAllocBytesForEviction
-                    || mBuffers.size() < kMinBufferCountForEviction)) {
+            if (!clearCache && mStats.buffersNotInUse() <= kUnusedBufferCountTarget &&
+                    (mStats.mSizeCached < kMinAllocBytesForEviction ||
+                     mBuffers.size() < kMinBufferCountForEviction)) {
                 break;
             }
             auto it = mBuffers.find(*freeIt);
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index cd1b4d0..3d39941 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -193,6 +193,12 @@
                 : mSizeCached(0), mBuffersCached(0), mSizeInUse(0), mBuffersInUse(0),
                   mTotalAllocations(0), mTotalRecycles(0), mTotalTransfers(0), mTotalFetches(0) {}
 
+            /// # of currently unused buffers
+            size_t buffersNotInUse() const {
+                ALOG_ASSERT(mBuffersCached >= mBuffersInUse);
+                return mBuffersCached - mBuffersInUse;
+            }
+
             /// A new buffer is allocated on an allocation request.
             void onBufferAllocated(size_t allocSize) {
                 mSizeCached += allocSize;
diff --git a/media/bufferpool/2.0/BufferPoolClient.cpp b/media/bufferpool/2.0/BufferPoolClient.cpp
index 342fef6..9308b81 100644
--- a/media/bufferpool/2.0/BufferPoolClient.cpp
+++ b/media/bufferpool/2.0/BufferPoolClient.cpp
@@ -32,6 +32,8 @@
 static constexpr int64_t kReceiveTimeoutUs = 1000000; // 100ms
 static constexpr int kPostMaxRetry = 3;
 static constexpr int kCacheTtlUs = 1000000; // TODO: tune
+static constexpr size_t kMaxCachedBufferCount = 64;
+static constexpr size_t kCachedBufferCountTarget = kMaxCachedBufferCount - 16;
 
 class BufferPoolClient::Impl
         : public std::enable_shared_from_this<BufferPoolClient::Impl> {
@@ -136,6 +138,10 @@
             --mActive;
             mLastChangeUs = getTimestampNow();
         }
+
+        int cachedBufferCount() const {
+            return mBuffers.size() - mActive;
+        }
     } mCache;
 
     // FMQ - release notifier
@@ -668,10 +674,12 @@
 // should have mCache.mLock
 void BufferPoolClient::Impl::evictCaches(bool clearCache) {
     int64_t now = getTimestampNow();
-    if (now >= mLastEvictCacheUs + kCacheTtlUs || clearCache) {
+    if (now >= mLastEvictCacheUs + kCacheTtlUs ||
+            clearCache || mCache.cachedBufferCount() > kMaxCachedBufferCount) {
         size_t evicted = 0;
         for (auto it = mCache.mBuffers.begin(); it != mCache.mBuffers.end();) {
-            if (!it->second->hasCache() && (it->second->expire() || clearCache)) {
+            if (!it->second->hasCache() && (it->second->expire() ||
+                        clearCache || mCache.cachedBufferCount() > kCachedBufferCountTarget)) {
                 it = mCache.mBuffers.erase(it);
                 ++evicted;
             } else {
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index 82c061a..b1cf388 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -30,6 +30,7 @@
 
 namespace android {
 constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
+constexpr size_t kMaxDimension = 1920;
 constexpr char COMPONENT_NAME[] = "c2.android.mpeg2.decoder";
 
 class C2SoftMpeg2Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -64,8 +65,8 @@
                 DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
                 .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
                 .withFields({
-                    C2F(mSize, width).inRange(16, 1920, 4),
-                    C2F(mSize, height).inRange(16, 1088, 4),
+                    C2F(mSize, width).inRange(16, kMaxDimension, 2),
+                    C2F(mSize, height).inRange(16, kMaxDimension, 2),
                 })
                 .withSetter(SizeSetter)
                 .build());
@@ -91,8 +92,8 @@
                 DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
                 .withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 320, 240))
                 .withFields({
-                    C2F(mSize, width).inRange(2, 1920, 2),
-                    C2F(mSize, height).inRange(2, 1088, 2),
+                    C2F(mSize, width).inRange(2, kMaxDimension, 2),
+                    C2F(mSize, height).inRange(2, kMaxDimension, 2),
                 })
                 .withSetter(MaxPictureSizeSetter, mSize)
                 .build());
@@ -204,8 +205,8 @@
                                     const C2P<C2StreamPictureSizeInfo::output> &size) {
         (void)mayBlock;
         // TODO: get max width/height from the size's field helpers vs. hardcoding
-        me.set().width = c2_min(c2_max(me.v.width, size.v.width), 1920u);
-        me.set().height = c2_min(c2_max(me.v.height, size.v.height), 1088u);
+        me.set().width = c2_min(c2_max(me.v.width, size.v.width), kMaxDimension);
+        me.set().height = c2_min(c2_max(me.v.height, size.v.height), kMaxDimension);
         return C2R::Ok();
     }
 
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index a7cc037..ddd312f 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -35,8 +35,10 @@
 namespace android {
 constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
 #ifdef MPEG4
+constexpr size_t kMaxDimension = 1920;
 constexpr char COMPONENT_NAME[] = "c2.android.mpeg4.decoder";
 #else
+constexpr size_t kMaxDimension = 352;
 constexpr char COMPONENT_NAME[] = "c2.android.h263.decoder";
 #endif
 
@@ -75,13 +77,8 @@
                 DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
                 .withDefault(new C2StreamPictureSizeInfo::output(0u, 176, 144))
                 .withFields({
-#ifdef MPEG4
-                    C2F(mSize, width).inRange(2, 1920, 2),
-                    C2F(mSize, height).inRange(2, 1088, 2),
-#else
-                    C2F(mSize, width).inRange(2, 352, 2),
-                    C2F(mSize, height).inRange(2, 288, 2),
-#endif
+                    C2F(mSize, width).inRange(2, kMaxDimension, 2),
+                    C2F(mSize, height).inRange(2, kMaxDimension, 2),
                 })
                 .withSetter(SizeSetter)
                 .build());
@@ -130,19 +127,10 @@
 
         addParameter(
                 DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
-#ifdef MPEG4
-                .withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 1920, 1088))
-#else
                 .withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 352, 288))
-#endif
                 .withFields({
-#ifdef MPEG4
-                    C2F(mSize, width).inRange(2, 1920, 2),
-                    C2F(mSize, height).inRange(2, 1088, 2),
-#else
-                    C2F(mSize, width).inRange(2, 352, 2),
-                    C2F(mSize, height).inRange(2, 288, 2),
-#endif
+                    C2F(mSize, width).inRange(2, kMaxDimension, 2),
+                    C2F(mSize, height).inRange(2, kMaxDimension, 2),
                 })
                 .withSetter(MaxPictureSizeSetter, mSize)
                 .build());
@@ -200,13 +188,8 @@
                                     const C2P<C2StreamPictureSizeInfo::output> &size) {
         (void)mayBlock;
         // TODO: get max width/height from the size's field helpers vs. hardcoding
-#ifdef MPEG4
-        me.set().width = c2_min(c2_max(me.v.width, size.v.width), 1920u);
-        me.set().height = c2_min(c2_max(me.v.height, size.v.height), 1088u);
-#else
-        me.set().width = c2_min(c2_max(me.v.width, size.v.width), 352u);
-        me.set().height = c2_min(c2_max(me.v.height, size.v.height), 288u);
-#endif
+        me.set().width = c2_min(c2_max(me.v.width, size.v.width), kMaxDimension);
+        me.set().height = c2_min(c2_max(me.v.height, size.v.height), kMaxDimension);
         return C2R::Ok();
     }
 
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 38f7389..752140a 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -151,6 +151,7 @@
 
     /* protected content */
     kParamIndexSecureMode,
+    kParamIndexEncryptedBuffer, // info-buffer, used with SM_READ_PROTECTED_WITH_ENCRYPTED
 
     // deprecated
     kParamIndexDelayRequest = kParamIndexDelay | C2Param::CoreIndex::IS_REQUEST_FLAG,
@@ -221,6 +222,7 @@
     kParamIndexDrcEffectType, // drc, enum
     kParamIndexDrcOutputLoudness, // drc, float (dBFS)
     kParamIndexDrcAlbumMode, // drc, enum
+    kParamIndexAudioFrameSize, // int
 
     /* ============================== platform-defined parameters ============================== */
 
@@ -1144,6 +1146,8 @@
 C2ENUM(C2Config::secure_mode_t, uint32_t,
     SM_UNPROTECTED,    ///< no content protection
     SM_READ_PROTECTED, ///< input and output buffers shall be protected from reading
+    /// both read protected and readable encrypted buffers are used
+    SM_READ_PROTECTED_WITH_ENCRYPTED,
 )
 
 typedef C2GlobalParam<C2Tuning, C2SimpleValueStruct<C2Config::secure_mode_t>, kParamIndexSecureMode>
@@ -1969,9 +1973,20 @@
 /**
  * DRC output loudness in dBFS. Retrieved during decoding
  */
- typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexDrcOutputLoudness>
+typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexDrcOutputLoudness>
         C2StreamDrcOutputLoudnessTuning;
- constexpr char C2_PARAMKEY_DRC_OUTPUT_LOUDNESS[] = "output.drc.output-loudness";
+constexpr char C2_PARAMKEY_DRC_OUTPUT_LOUDNESS[] = "output.drc.output-loudness";
+
+/**
+ * Audio frame size in samples.
+ *
+ * Audio encoders can expose this parameter to signal the desired audio frame
+ * size that corresponds to a single coded access unit.
+ * Default value is 0, meaning that the encoder accepts input buffers of any size.
+ */
+typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexAudioFrameSize>
+        C2StreamAudioFrameSizeInfo;
+constexpr char C2_PARAMKEY_AUDIO_FRAME_SIZE[] = "raw.audio-frame-size";
 
 /* --------------------------------------- AAC components --------------------------------------- */
 
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 1f0c856..72f7c43 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -895,13 +895,12 @@
         BufferPoolSender* bufferPoolSender,
         std::list<BaseBlock>* baseBlocks,
         std::map<const void*, uint32_t>* baseBlockIndices) {
-    // TODO: C2InfoBuffer is not implemented.
-    (void)d;
-    (void)s;
-    (void)bufferPoolSender;
-    (void)baseBlocks;
-    (void)baseBlockIndices;
-    LOG(INFO) << "InfoBuffer not implemented.";
+    d->index = static_cast<ParamIndex>(s.index());
+    Buffer& dBuffer = d->buffer;
+    if (!objcpy(&dBuffer, s.data(), bufferPoolSender, baseBlocks, baseBlockIndices)) {
+        LOG(ERROR) << "Invalid C2InfoBuffer::data";
+        return false;
+    }
     return true;
 }
 
@@ -1336,6 +1335,68 @@
     return true;
 }
 
+// InfoBuffer -> C2InfoBuffer
+bool objcpy(std::vector<C2InfoBuffer> *d, const InfoBuffer& s,
+        const std::vector<C2BaseBlock>& baseBlocks) {
+
+    // Currently, a non-null C2InfoBufer must contain exactly 1 block.
+    if (s.buffer.blocks.size() == 0) {
+        return true;
+    } else if (s.buffer.blocks.size() != 1) {
+        LOG(ERROR) << "Invalid InfoBuffer::Buffer "
+                      "Currently, a C2InfoBuffer must contain exactly 1 block.";
+        return false;
+    }
+
+    const Block &sBlock = s.buffer.blocks[0];
+    if (sBlock.index >= baseBlocks.size()) {
+        LOG(ERROR) << "Invalid InfoBuffer::Buffer::blocks[0].index: "
+                      "Array index out of range.";
+        return false;
+    }
+    const C2BaseBlock &baseBlock = baseBlocks[sBlock.index];
+
+    // Parse meta.
+    std::vector<C2Param*> sBlockMeta;
+    if (!parseParamsBlob(&sBlockMeta, sBlock.meta)) {
+        LOG(ERROR) << "Invalid InfoBuffer::Buffer::blocks[0].meta.";
+        return false;
+    }
+
+    // Copy fence.
+    C2Fence dFence;
+    if (!objcpy(&dFence, sBlock.fence)) {
+        LOG(ERROR) << "Invalid InfoBuffer::Buffer::blocks[0].fence.";
+        return false;
+    }
+
+    // Construct a block.
+    switch (baseBlock.type) {
+    case C2BaseBlock::LINEAR:
+        if (sBlockMeta.size() == 1 && sBlockMeta[0] != nullptr &&
+            sBlockMeta[0]->size() == sizeof(C2Hidl_RangeInfo)) {
+            C2Hidl_RangeInfo *rangeInfo =
+                    reinterpret_cast<C2Hidl_RangeInfo*>(sBlockMeta[0]);
+            d->emplace_back(C2InfoBuffer::CreateLinearBuffer(
+                    s.index,
+                    baseBlock.linear->share(
+                            rangeInfo->offset, rangeInfo->length, dFence)));
+            return true;
+        }
+        LOG(ERROR) << "Invalid Meta for C2BaseBlock::Linear InfoBuffer.";
+        break;
+    case C2BaseBlock::GRAPHIC:
+        // It's not used now
+        LOG(ERROR) << "Non-Used C2BaseBlock::type for InfoBuffer.";
+        break;
+    default:
+        LOG(ERROR) << "Invalid C2BaseBlock::type for InfoBuffer.";
+        break;
+    }
+
+    return false;
+}
+
 // FrameData -> C2FrameData
 bool objcpy(C2FrameData* d, const FrameData& s,
         const std::vector<C2BaseBlock>& baseBlocks) {
@@ -1370,8 +1431,18 @@
         }
     }
 
-    // TODO: Implement this once C2InfoBuffer has constructors.
     d->infoBuffers.clear();
+    if (s.infoBuffers.size() == 0) {
+        // InfoBuffer is optional
+        return true;
+    }
+    d->infoBuffers.reserve(s.infoBuffers.size());
+    for (const InfoBuffer &sInfoBuffer: s.infoBuffers) {
+        if (!objcpy(&(d->infoBuffers), sInfoBuffer, baseBlocks)) {
+            LOG(ERROR) << "Invalid Framedata::infoBuffers.";
+            return false;
+        }
+    }
     return true;
 }
 
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 94034b5..c3cfcce 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -11,6 +11,7 @@
         "CCodecConfig.cpp",
         "Codec2Buffer.cpp",
         "Codec2InfoBuilder.cpp",
+        "FrameReassembler.cpp",
         "PipelineWatcher.cpp",
         "ReflectedParamUpdater.cpp",
     ],
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 05c1182..ba1d178 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -143,7 +143,8 @@
       mFrameIndex(0u),
       mFirstValidFrameIndex(0u),
       mMetaMode(MODE_NONE),
-      mInputMetEos(false) {
+      mInputMetEos(false),
+      mSendEncryptedInfoBuffer(false) {
     mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
     {
         Mutexed<Input>::Locked input(mInput);
@@ -188,7 +189,10 @@
     return mInputSurface->signalEndOfInputStream();
 }
 
-status_t CCodecBufferChannel::queueInputBufferInternal(sp<MediaCodecBuffer> buffer) {
+status_t CCodecBufferChannel::queueInputBufferInternal(
+        sp<MediaCodecBuffer> buffer,
+        std::shared_ptr<C2LinearBlock> encryptedBlock,
+        size_t blockSize) {
     int64_t timeUs;
     CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
@@ -209,6 +213,7 @@
         flags |= C2FrameData::FLAG_CODEC_CONFIG;
     }
     ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
+    std::list<std::unique_ptr<C2Work>> items;
     std::unique_ptr<C2Work> work(new C2Work);
     work->input.ordinal.timestamp = timeUs;
     work->input.ordinal.frameIndex = mFrameIndex++;
@@ -218,9 +223,8 @@
     work->input.ordinal.customOrdinal = timeUs;
     work->input.buffers.clear();
 
-    uint64_t queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
-    std::vector<std::shared_ptr<C2Buffer>> queuedBuffers;
     sp<Codec2Buffer> copy;
+    bool usesFrameReassembler = false;
 
     if (buffer->size() > 0u) {
         Mutexed<Input>::Locked input(mInput);
@@ -245,30 +249,38 @@
                       "buffer starvation on component.", mName);
             }
         }
-        work->input.buffers.push_back(c2buffer);
-        queuedBuffers.push_back(c2buffer);
+        if (input->frameReassembler) {
+            usesFrameReassembler = true;
+            input->frameReassembler.process(buffer, &items);
+        } else {
+            work->input.buffers.push_back(c2buffer);
+            if (encryptedBlock) {
+                work->input.infoBuffers.emplace_back(C2InfoBuffer::CreateLinearBuffer(
+                        kParamIndexEncryptedBuffer,
+                        encryptedBlock->share(0, blockSize, C2Fence())));
+            }
+        }
     } else if (eos) {
         flags |= C2FrameData::FLAG_END_OF_STREAM;
     }
-    work->input.flags = (C2FrameData::flags_t)flags;
-    // TODO: fill info's
+    if (usesFrameReassembler) {
+        if (!items.empty()) {
+            items.front()->input.configUpdate = std::move(mParamsToBeSet);
+            mFrameIndex = (items.back()->input.ordinal.frameIndex + 1).peek();
+        }
+    } else {
+        work->input.flags = (C2FrameData::flags_t)flags;
+        // TODO: fill info's
 
-    work->input.configUpdate = std::move(mParamsToBeSet);
-    work->worklets.clear();
-    work->worklets.emplace_back(new C2Worklet);
+        work->input.configUpdate = std::move(mParamsToBeSet);
+        work->worklets.clear();
+        work->worklets.emplace_back(new C2Worklet);
 
-    std::list<std::unique_ptr<C2Work>> items;
-    items.push_back(std::move(work));
-    mPipelineWatcher.lock()->onWorkQueued(
-            queuedFrameIndex,
-            std::move(queuedBuffers),
-            PipelineWatcher::Clock::now());
-    c2_status_t err = mComponent->queue(&items);
-    if (err != C2_OK) {
-        mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
+        items.push_back(std::move(work));
+
+        eos = eos && buffer->size() > 0u;
     }
-
-    if (err == C2_OK && eos && buffer->size() > 0u) {
+    if (eos) {
         work.reset(new C2Work);
         work->input.ordinal.timestamp = timeUs;
         work->input.ordinal.frameIndex = mFrameIndex++;
@@ -277,23 +289,28 @@
         work->input.buffers.clear();
         work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
         work->worklets.emplace_back(new C2Worklet);
-
-        queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
-        queuedBuffers.clear();
-
-        items.clear();
         items.push_back(std::move(work));
-
-        mPipelineWatcher.lock()->onWorkQueued(
-                queuedFrameIndex,
-                std::move(queuedBuffers),
-                PipelineWatcher::Clock::now());
-        err = mComponent->queue(&items);
-        if (err != C2_OK) {
-            mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
-        }
     }
-    if (err == C2_OK) {
+    c2_status_t err = C2_OK;
+    if (!items.empty()) {
+        {
+            Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+            PipelineWatcher::Clock::time_point now = PipelineWatcher::Clock::now();
+            for (const std::unique_ptr<C2Work> &work : items) {
+                watcher->onWorkQueued(
+                        work->input.ordinal.frameIndex.peeku(),
+                        std::vector(work->input.buffers),
+                        now);
+            }
+        }
+        err = mComponent->queue(&items);
+    }
+    if (err != C2_OK) {
+        Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+        for (const std::unique_ptr<C2Work> &work : items) {
+            watcher->onWorkDone(work->input.ordinal.frameIndex.peeku());
+        }
+    } else {
         Mutexed<Input>::Locked input(mInput);
         bool released = false;
         if (buffer) {
@@ -514,6 +531,40 @@
     }
     sp<EncryptedLinearBlockBuffer> encryptedBuffer((EncryptedLinearBlockBuffer *)buffer.get());
 
+    std::shared_ptr<C2LinearBlock> block;
+    size_t allocSize = buffer->size();
+    size_t bufferSize = 0;
+    c2_status_t blockRes = C2_OK;
+    bool copied = false;
+    if (mSendEncryptedInfoBuffer) {
+        static const C2MemoryUsage kDefaultReadWriteUsage{
+            C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+        constexpr int kAllocGranule0 = 1024 * 64;
+        constexpr int kAllocGranule1 = 1024 * 1024;
+        std::shared_ptr<C2BlockPool> pool = mBlockPools.lock()->inputPool;
+        // round up encrypted sizes to limit fragmentation and encourage buffer reuse
+        if (allocSize <= kAllocGranule1) {
+            bufferSize = align(allocSize, kAllocGranule0);
+        } else {
+            bufferSize = align(allocSize, kAllocGranule1);
+        }
+        blockRes = pool->fetchLinearBlock(
+                bufferSize, kDefaultReadWriteUsage, &block);
+
+        if (blockRes == C2_OK) {
+            C2WriteView view = block->map().get();
+            if (view.error() == C2_OK && view.size() == bufferSize) {
+                copied = true;
+                // TODO: only copy clear sections
+                memcpy(view.data(), buffer->data(), allocSize);
+            }
+        }
+    }
+
+    if (!copied) {
+        block.reset();
+    }
+
     ssize_t result = -1;
     ssize_t codecDataOffset = 0;
     if (numSubSamples == 1
@@ -605,7 +656,8 @@
     }
 
     buffer->setRange(codecDataOffset, result - codecDataOffset);
-    return queueInputBufferInternal(buffer);
+
+    return queueInputBufferInternal(buffer, block, bufferSize);
 }
 
 void CCodecBufferChannel::feedInputBufferIfAvailable() {
@@ -882,27 +934,31 @@
         bool buffersBoundToCodec) {
     C2StreamBufferTypeSetting::input iStreamFormat(0u);
     C2StreamBufferTypeSetting::output oStreamFormat(0u);
+    C2ComponentKindSetting kind;
     C2PortReorderBufferDepthTuning::output reorderDepth;
     C2PortReorderKeySetting::output reorderKey;
     C2PortActualDelayTuning::input inputDelay(0);
     C2PortActualDelayTuning::output outputDelay(0);
     C2ActualPipelineDelayTuning pipelineDelay(0);
+    C2SecureModeTuning secureMode(C2Config::SM_UNPROTECTED);
 
     c2_status_t err = mComponent->query(
             {
                 &iStreamFormat,
                 &oStreamFormat,
+                &kind,
                 &reorderDepth,
                 &reorderKey,
                 &inputDelay,
                 &pipelineDelay,
                 &outputDelay,
+                &secureMode,
             },
             {},
             C2_DONT_BLOCK,
             nullptr);
     if (err == C2_BAD_INDEX) {
-        if (!iStreamFormat || !oStreamFormat) {
+        if (!iStreamFormat || !oStreamFormat || !kind) {
             return UNKNOWN_ERROR;
         }
     } else if (err != C2_OK) {
@@ -919,18 +975,26 @@
     // TODO: get this from input format
     bool secure = mComponent->getName().find(".secure") != std::string::npos;
 
+    // secure mode is a static parameter (shall not change in the executing state)
+    mSendEncryptedInfoBuffer = secureMode.value == C2Config::SM_READ_PROTECTED_WITH_ENCRYPTED;
+
     std::shared_ptr<C2AllocatorStore> allocatorStore = GetCodec2PlatformAllocatorStore();
     int poolMask = GetCodec2PoolMask();
     C2PlatformAllocatorStore::id_t preferredLinearId = GetPreferredLinearAllocatorId(poolMask);
 
     if (inputFormat != nullptr) {
         bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
+        bool audioEncoder = !graphic && (kind.value == C2Component::KIND_ENCODER);
         C2Config::api_feature_t apiFeatures = C2Config::api_feature_t(
                 API_REFLECTION |
                 API_VALUES |
                 API_CURRENT_VALUES |
                 API_DEPENDENCY |
                 API_SAME_INPUT_BUFFER);
+        C2StreamAudioFrameSizeInfo::input encoderFrameSize(0u);
+        C2StreamSampleRateInfo::input sampleRate(0u);
+        C2StreamChannelCountInfo::input channelCount(0u);
+        C2StreamPcmEncodingInfo::input pcmEncoding(0u);
         std::shared_ptr<C2BlockPool> pool;
         {
             Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -943,7 +1007,19 @@
             // from component, create the input block pool with given ID. Otherwise, use default IDs.
             std::vector<std::unique_ptr<C2Param>> params;
             C2ApiFeaturesSetting featuresSetting{apiFeatures};
-            err = mComponent->query({ &featuresSetting },
+            std::vector<C2Param *> stackParams({&featuresSetting});
+            if (audioEncoder) {
+                stackParams.push_back(&encoderFrameSize);
+                stackParams.push_back(&sampleRate);
+                stackParams.push_back(&channelCount);
+                stackParams.push_back(&pcmEncoding);
+            } else {
+                encoderFrameSize.invalidate();
+                sampleRate.invalidate();
+                channelCount.invalidate();
+                pcmEncoding.invalidate();
+            }
+            err = mComponent->query(stackParams,
                                     { C2PortAllocatorsTuning::input::PARAM_TYPE },
                                     C2_DONT_BLOCK,
                                     &params);
@@ -1001,10 +1077,21 @@
         input->numSlots = numInputSlots;
         input->extraBuffers.flush();
         input->numExtraSlots = 0u;
+        if (audioEncoder && encoderFrameSize && sampleRate && channelCount) {
+            input->frameReassembler.init(
+                    pool,
+                    {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+                    encoderFrameSize.value,
+                    sampleRate.value,
+                    channelCount.value,
+                    pcmEncoding ? pcmEncoding.value : C2Config::PCM_16);
+        }
         bool conforming = (apiFeatures & API_SAME_INPUT_BUFFER);
         // For encrypted content, framework decrypts source buffer (ashmem) into
         // C2Buffers. Thus non-conforming codecs can process these.
-        if (!buffersBoundToCodec && (hasCryptoOrDescrambler() || conforming)) {
+        if (!buffersBoundToCodec
+                && !input->frameReassembler
+                && (hasCryptoOrDescrambler() || conforming)) {
             input->buffers.reset(new SlotInputBuffers(mName));
         } else if (graphic) {
             if (mInputSurface) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index e2c9aaa..b9e8d39 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -31,6 +31,7 @@
 #include <media/stagefright/CodecBase.h>
 
 #include "CCodecBuffers.h"
+#include "FrameReassembler.h"
 #include "InputSurfaceWrapper.h"
 #include "PipelineWatcher.h"
 
@@ -238,7 +239,9 @@
 
     void feedInputBufferIfAvailable();
     void feedInputBufferIfAvailableInternal();
-    status_t queueInputBufferInternal(sp<MediaCodecBuffer> buffer);
+    status_t queueInputBufferInternal(sp<MediaCodecBuffer> buffer,
+                                      std::shared_ptr<C2LinearBlock> encryptedBlock = nullptr,
+                                      size_t blockSize = 0);
     bool handleWork(
             std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
             const C2StreamInitDataInfo::output *initData);
@@ -269,6 +272,8 @@
         size_t numExtraSlots;
         uint32_t inputDelay;
         uint32_t pipelineDelay;
+
+        FrameReassembler frameReassembler;
     };
     Mutexed<Input> mInput;
     struct Output {
@@ -315,6 +320,7 @@
     inline bool hasCryptoOrDescrambler() {
         return mCrypto != nullptr || mDescrambler != nullptr;
     }
+    std::atomic_bool mSendEncryptedInfoBuffer;
 };
 
 // Conversion of a c2_status_t value to a status_t value may depend on the
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
new file mode 100644
index 0000000..f8e6937
--- /dev/null
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameReassembler"
+
+#include <log/log.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "FrameReassembler.h"
+
+namespace android {
+
+static constexpr uint64_t kToleranceUs = 1000;  // 1ms
+
+FrameReassembler::FrameReassembler()
+    : mUsage{0, 0},
+      mSampleRate(0u),
+      mChannelCount(0u),
+      mEncoding(C2Config::PCM_16),
+      mCurrentOrdinal({0, 0, 0}) {
+}
+
+void FrameReassembler::init(
+        const std::shared_ptr<C2BlockPool> &pool,
+        C2MemoryUsage usage,
+        uint32_t frameSize,
+        uint32_t sampleRate,
+        uint32_t channelCount,
+        C2Config::pcm_encoding_t encoding) {
+    mBlockPool = pool;
+    mUsage = usage;
+    mFrameSize = frameSize;
+    mSampleRate = sampleRate;
+    mChannelCount = channelCount;
+    mEncoding = encoding;
+}
+
+void FrameReassembler::updateFrameSize(uint32_t frameSize) {
+    finishCurrentBlock(&mPendingWork);
+    mFrameSize = frameSize;
+}
+
+void FrameReassembler::updateSampleRate(uint32_t sampleRate) {
+    finishCurrentBlock(&mPendingWork);
+    mSampleRate = sampleRate;
+}
+
+void FrameReassembler::updateChannelCount(uint32_t channelCount) {
+    finishCurrentBlock(&mPendingWork);
+    mChannelCount = channelCount;
+}
+
+void FrameReassembler::updatePcmEncoding(C2Config::pcm_encoding_t encoding) {
+    finishCurrentBlock(&mPendingWork);
+    mEncoding = encoding;
+}
+
+void FrameReassembler::reset() {
+    flush();
+    mCurrentOrdinal = {0, 0, 0};
+    mBlockPool.reset();
+    mFrameSize.reset();
+    mSampleRate = 0u;
+    mChannelCount = 0u;
+    mEncoding = C2Config::PCM_16;
+}
+
+FrameReassembler::operator bool() const {
+    return mFrameSize.has_value();
+}
+
+c2_status_t FrameReassembler::process(
+        const sp<MediaCodecBuffer> &buffer,
+        std::list<std::unique_ptr<C2Work>> *items) {
+    int64_t timeUs;
+    if (buffer->size() == 0u
+            || !buffer->meta()->findInt64("timeUs", &timeUs)) {
+        return C2_BAD_VALUE;
+    }
+
+    items->splice(items->end(), mPendingWork);
+
+    // Fill mCurrentBlock
+    if (mCurrentBlock) {
+        // First check the timestamp
+        c2_cntr64_t endTimestampUs = mCurrentOrdinal.timestamp;
+        endTimestampUs += bytesToSamples(mWriteView->size()) * 1000000 / mSampleRate;
+        if (timeUs < endTimestampUs.peek()) {
+            uint64_t diffUs = (endTimestampUs - timeUs).peeku();
+            if (diffUs > kToleranceUs) {
+                // The timestamp is going back in time in large amount.
+                // TODO: b/145702136
+                ALOGW("timestamp going back in time! from %lld to %lld",
+                        endTimestampUs.peekll(), (long long)timeUs);
+            }
+        } else {  // timeUs >= endTimestampUs.peek()
+            uint64_t diffUs = (timeUs - endTimestampUs).peeku();
+            if (diffUs > kToleranceUs) {
+                // The timestamp is going forward; add silence as necessary.
+                size_t gapSamples = usToSamples(diffUs);
+                size_t remainingSamples =
+                    (mWriteView->capacity() - mWriteView->size())
+                    / mChannelCount / bytesPerSample();
+                if (gapSamples < remainingSamples) {
+                    size_t gapBytes = gapSamples * mChannelCount * bytesPerSample();
+                    memset(mWriteView->base() + mWriteView->size(), 0u, gapBytes);
+                    mWriteView->setSize(mWriteView->size() + gapBytes);
+                } else {
+                    finishCurrentBlock(items);
+                }
+            }
+        }
+    }
+
+    if (mCurrentBlock) {
+        // Append the data at the end of the current block
+        size_t copySize = std::min(
+                buffer->size(),
+                size_t(mWriteView->capacity() - mWriteView->size()));
+        memcpy(mWriteView->base() + mWriteView->size(), buffer->data(), copySize);
+        buffer->setRange(buffer->offset() + copySize, buffer->size() - copySize);
+        mWriteView->setSize(mWriteView->size() + copySize);
+        if (mWriteView->size() == mWriteView->capacity()) {
+            finishCurrentBlock(items);
+        }
+        timeUs += bytesToSamples(copySize) * 1000000 / mSampleRate;
+    }
+
+    if (buffer->size() > 0) {
+        mCurrentOrdinal.timestamp = timeUs;
+    }
+
+    size_t frameSizeBytes = mFrameSize.value() * mChannelCount * bytesPerSample();
+    while (buffer->size() > 0) {
+        LOG_ALWAYS_FATAL_IF(
+                mCurrentBlock,
+                "There's remaining data but the pending block is not filled & finished");
+        std::unique_ptr<C2Work> work(new C2Work);
+        c2_status_t err = mBlockPool->fetchLinearBlock(frameSizeBytes, mUsage, &mCurrentBlock);
+        if (err != C2_OK) {
+            return err;
+        }
+        size_t copySize = std::min(buffer->size(), frameSizeBytes);
+        mWriteView = mCurrentBlock->map().get();
+        if (mWriteView->error() != C2_OK) {
+            return mWriteView->error();
+        }
+        ALOGV("buffer={offset=%zu size=%zu) copySize=%zu",
+                buffer->offset(), buffer->size(), copySize);
+        memcpy(mWriteView->base(), buffer->data(), copySize);
+        mWriteView->setOffset(0u);
+        mWriteView->setSize(copySize);
+        buffer->setRange(buffer->offset() + copySize, buffer->size() - copySize);
+        if (copySize == frameSizeBytes) {
+            finishCurrentBlock(items);
+        }
+    }
+
+    int32_t eos = 0;
+    if (buffer->meta()->findInt32("eos", &eos) && eos) {
+        finishCurrentBlock(items);
+    }
+
+    return C2_OK;
+}
+
+void FrameReassembler::flush() {
+    mPendingWork.clear();
+    mWriteView.reset();
+    mCurrentBlock.reset();
+}
+
+uint64_t FrameReassembler::bytesToSamples(size_t numBytes) const {
+    return numBytes / mChannelCount / bytesPerSample();
+}
+
+size_t FrameReassembler::usToSamples(uint64_t us) const {
+    return (us * mChannelCount * mSampleRate / 1000000);
+}
+
+uint32_t FrameReassembler::bytesPerSample() const {
+    return (mEncoding == C2Config::PCM_8) ? 1
+         : (mEncoding == C2Config::PCM_16) ? 2
+         : (mEncoding == C2Config::PCM_FLOAT) ? 4 : 0;
+}
+
+void FrameReassembler::finishCurrentBlock(std::list<std::unique_ptr<C2Work>> *items) {
+    if (!mCurrentBlock) {
+        // No-op
+        return;
+    }
+    if (mWriteView->size() < mWriteView->capacity()) {
+        memset(mWriteView->base() + mWriteView->size(), 0u,
+                mWriteView->capacity() - mWriteView->size());
+        mWriteView->setSize(mWriteView->capacity());
+    }
+    std::unique_ptr<C2Work> work{std::make_unique<C2Work>()};
+    work->input.ordinal = mCurrentOrdinal;
+    work->input.buffers.push_back(C2Buffer::CreateLinearBuffer(
+            mCurrentBlock->share(0, mCurrentBlock->capacity(), C2Fence())));
+    work->worklets.clear();
+    work->worklets.emplace_back(new C2Worklet);
+    items->push_back(std::move(work));
+
+    ++mCurrentOrdinal.frameIndex;
+    mCurrentOrdinal.timestamp += mFrameSize.value() * 1000000 / mSampleRate;
+    mCurrentBlock.reset();
+    mWriteView.reset();
+}
+
+}  // namespace android
diff --git a/media/codec2/sfplugin/FrameReassembler.h b/media/codec2/sfplugin/FrameReassembler.h
new file mode 100644
index 0000000..17ac06d
--- /dev/null
+++ b/media/codec2/sfplugin/FrameReassembler.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_REASSEMBLER_H_
+#define FRAME_REASSEMBLER_H_
+
+#include <set>
+#include <memory>
+
+#include <media/MediaCodecBuffer.h>
+
+#include <C2Config.h>
+#include <C2Work.h>
+
+namespace android {
+
+class FrameReassembler {
+public:
+    FrameReassembler();
+
+    void init(
+            const std::shared_ptr<C2BlockPool> &pool,
+            C2MemoryUsage usage,
+            uint32_t frameSize,
+            uint32_t sampleRate,
+            uint32_t channelCount,
+            C2Config::pcm_encoding_t encoding);
+    void updateFrameSize(uint32_t frameSize);
+    void updateSampleRate(uint32_t sampleRate);
+    void updateChannelCount(uint32_t channelCount);
+    void updatePcmEncoding(C2Config::pcm_encoding_t encoding);
+    void reset();
+    void flush();
+
+    explicit operator bool() const;
+
+    c2_status_t process(
+            const sp<MediaCodecBuffer> &buffer,
+            std::list<std::unique_ptr<C2Work>> *items);
+
+private:
+    std::shared_ptr<C2BlockPool> mBlockPool;
+    C2MemoryUsage mUsage;
+    std::optional<uint32_t> mFrameSize;
+    uint32_t mSampleRate;
+    uint32_t mChannelCount;
+    C2Config::pcm_encoding_t mEncoding;
+    std::list<std::unique_ptr<C2Work>> mPendingWork;
+    C2WorkOrdinalStruct mCurrentOrdinal;
+    std::shared_ptr<C2LinearBlock> mCurrentBlock;
+    std::optional<C2WriteView> mWriteView;
+
+    uint64_t bytesToSamples(size_t numBytes) const;
+    size_t usToSamples(uint64_t us) const;
+    uint32_t bytesPerSample() const;
+
+    void finishCurrentBlock(std::list<std::unique_ptr<C2Work>> *items);
+};
+
+}  // namespace android
+
+#endif  // FRAME_REASSEMBLER_H_
diff --git a/media/codec2/sfplugin/tests/Android.bp b/media/codec2/sfplugin/tests/Android.bp
index 8d1a9c3..51b99a4 100644
--- a/media/codec2/sfplugin/tests/Android.bp
+++ b/media/codec2/sfplugin/tests/Android.bp
@@ -4,6 +4,7 @@
     srcs: [
         "CCodecBuffers_test.cpp",
         "CCodecConfig_test.cpp",
+        "FrameReassembler_test.cpp",
         "ReflectedParamUpdater_test.cpp",
     ],
 
diff --git a/media/codec2/sfplugin/tests/FrameReassembler_test.cpp b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
new file mode 100644
index 0000000..6738ee7
--- /dev/null
+++ b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FrameReassembler.h"
+
+#include <gtest/gtest.h>
+
+#include <C2PlatformSupport.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+static size_t BytesPerSample(C2Config::pcm_encoding_t encoding) {
+    return encoding == PCM_8 ? 1
+         : encoding == PCM_16 ? 2
+         : encoding == PCM_FLOAT ? 4 : 0;
+}
+
+static uint64_t Diff(c2_cntr64_t a, c2_cntr64_t b) {
+    return std::abs((a - b).peek());
+}
+
+class FrameReassemblerTest : public ::testing::Test {
+public:
+    static const C2MemoryUsage kUsage;
+    static constexpr uint64_t kTimestampToleranceUs = 100;
+
+    FrameReassemblerTest() {
+        mInitStatus = GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, nullptr, &mPool);
+    }
+
+    status_t initStatus() const { return mInitStatus; }
+
+    void testPushSameSize(
+            size_t encoderFrameSize,
+            size_t sampleRate,
+            size_t channelCount,
+            C2Config::pcm_encoding_t encoding,
+            size_t inputFrameSizeInBytes,
+            size_t count,
+            size_t expectedOutputSize) {
+        FrameReassembler frameReassembler;
+        frameReassembler.init(
+                mPool,
+                kUsage,
+                encoderFrameSize,
+                sampleRate,
+                channelCount,
+                encoding);
+
+        ASSERT_TRUE(frameReassembler) << "FrameReassembler init failed";
+
+        size_t inputIndex = 0, outputIndex = 0;
+        size_t expectCount = 0;
+        for (size_t i = 0; i < count; ++i) {
+            sp<MediaCodecBuffer> buffer = new MediaCodecBuffer(
+                    new AMessage, new ABuffer(inputFrameSizeInBytes));
+            buffer->setRange(0, inputFrameSizeInBytes);
+            buffer->meta()->setInt64(
+                    "timeUs",
+                    inputIndex * 1000000 / sampleRate / channelCount / BytesPerSample(encoding));
+            if (i == count - 1) {
+                buffer->meta()->setInt32("eos", 1);
+            }
+            for (size_t j = 0; j < inputFrameSizeInBytes; ++j, ++inputIndex) {
+                buffer->base()[j] = (inputIndex & 0xFF);
+            }
+            std::list<std::unique_ptr<C2Work>> items;
+            ASSERT_EQ(C2_OK, frameReassembler.process(buffer, &items));
+            while (!items.empty()) {
+                std::unique_ptr<C2Work> work = std::move(*items.begin());
+                items.erase(items.begin());
+                // Verify timestamp
+                uint64_t expectedTimeUs =
+                    outputIndex * 1000000 / sampleRate / channelCount / BytesPerSample(encoding);
+                EXPECT_GE(
+                        kTimestampToleranceUs,
+                        Diff(expectedTimeUs, work->input.ordinal.timestamp))
+                    << "expected timestamp: " << expectedTimeUs
+                    << " actual timestamp: " << work->input.ordinal.timestamp.peeku()
+                    << " output index: " << outputIndex;
+
+                // Verify buffer
+                ASSERT_EQ(1u, work->input.buffers.size());
+                std::shared_ptr<C2Buffer> buffer = work->input.buffers.front();
+                ASSERT_EQ(C2BufferData::LINEAR, buffer->data().type());
+                ASSERT_EQ(1u, buffer->data().linearBlocks().size());
+                C2ReadView view = buffer->data().linearBlocks().front().map().get();
+                ASSERT_EQ(C2_OK, view.error());
+                ASSERT_EQ(encoderFrameSize * BytesPerSample(encoding), view.capacity());
+                for (size_t j = 0; j < view.capacity(); ++j, ++outputIndex) {
+                    ASSERT_TRUE(outputIndex < inputIndex
+                             || inputIndex == inputFrameSizeInBytes * count);
+                    uint8_t expected = outputIndex < inputIndex ? (outputIndex & 0xFF) : 0;
+                    if (expectCount < 10) {
+                        ++expectCount;
+                        EXPECT_EQ(expected, view.data()[j]) << "output index = " << outputIndex;
+                    }
+                }
+            }
+        }
+
+        ASSERT_EQ(inputFrameSizeInBytes * count, inputIndex);
+        size_t encoderFrameSizeInBytes =
+            encoderFrameSize * channelCount * BytesPerSample(encoding);
+        ASSERT_EQ(0, outputIndex % encoderFrameSizeInBytes)
+            << "output size must be multiple of frame size: output size = " << outputIndex
+            << " frame size = " << encoderFrameSizeInBytes;
+        ASSERT_EQ(expectedOutputSize, outputIndex)
+            << "output size must be smallest multiple of frame size, "
+            << "equal to or larger than input size. output size = " << outputIndex
+            << " input size = " << inputIndex << " frame size = " << encoderFrameSizeInBytes;
+    }
+
+private:
+    status_t mInitStatus;
+    std::shared_ptr<C2BlockPool> mPool;
+};
+
+const C2MemoryUsage FrameReassemblerTest::kUsage{C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+
+// Push frames with exactly the same size as the encoder requested.
+TEST_F(FrameReassemblerTest, PushExactFrameSize) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            1024 /* input frame size in bytes = 1024 samples * 1 channel * 1 bytes/sample */,
+            10 /* count */,
+            10240 /* expected output size = 10 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            2048 /* input frame size in bytes = 1024 samples * 1 channel * 2 bytes/sample */,
+            10 /* count */,
+            20480 /* expected output size = 10 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            4096 /* input frame size in bytes = 1024 samples * 1 channel * 4 bytes/sample */,
+            10 /* count */,
+            40960 /* expected output size = 10 * 4096 bytes/frame */);
+}
+
+// Push frames with half the size that the encoder requested.
+TEST_F(FrameReassemblerTest, PushHalfFrameSize) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            512 /* input frame size in bytes = 512 samples * 1 channel * 1 bytes per sample */,
+            10 /* count */,
+            5120 /* expected output size = 5 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            1024 /* input frame size in bytes = 512 samples * 1 channel * 2 bytes per sample */,
+            10 /* count */,
+            10240 /* expected output size = 5 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            2048 /* input frame size in bytes = 512 samples * 1 channel * 4 bytes per sample */,
+            10 /* count */,
+            20480 /* expected output size = 5 * 4096 bytes/frame */);
+}
+
+// Push frames with twice the size that the encoder requested.
+TEST_F(FrameReassemblerTest, PushDoubleFrameSize) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            2048 /* input frame size in bytes = 2048 samples * 1 channel * 1 bytes per sample */,
+            10 /* count */,
+            20480 /* expected output size = 20 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            4096 /* input frame size in bytes = 2048 samples * 1 channel * 2 bytes per sample */,
+            10 /* count */,
+            40960 /* expected output size = 20 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            8192 /* input frame size in bytes = 2048 samples * 1 channel * 4 bytes per sample */,
+            10 /* count */,
+            81920 /* expected output size = 20 * 4096 bytes/frame */);
+}
+
+// Push frames with a little bit larger (+5 samples) than the requested size.
+TEST_F(FrameReassemblerTest, PushLittleLargerFrameSize) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            1029 /* input frame size in bytes = 1029 samples * 1 channel * 1 bytes per sample */,
+            10 /* count */,
+            11264 /* expected output size = 11 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            2058 /* input frame size in bytes = 1029 samples * 1 channel * 2 bytes per sample */,
+            10 /* count */,
+            22528 /* expected output size = 11 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            4116 /* input frame size in bytes = 1029 samples * 1 channel * 4 bytes per sample */,
+            10 /* count */,
+            45056 /* expected output size = 11 * 4096 bytes/frame */);
+}
+
+// Push frames with a little bit smaller (-5 samples) than the requested size.
+TEST_F(FrameReassemblerTest, PushLittleSmallerFrameSize) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            1019 /* input frame size in bytes = 1019 samples * 1 channel * 1 bytes per sample */,
+            10 /* count */,
+            10240 /* expected output size = 10 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            2038 /* input frame size in bytes = 1019 samples * 1 channel * 2 bytes per sample */,
+            10 /* count */,
+            20480 /* expected output size = 10 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            4076 /* input frame size in bytes = 1019 samples * 1 channel * 4 bytes per sample */,
+            10 /* count */,
+            40960 /* expected output size = 10 * 4096 bytes/frame */);
+}
+
+// Push single-byte frames
+TEST_F(FrameReassemblerTest, PushSingleByte) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            1 /* input frame size in bytes */,
+            100000 /* count */,
+            100352 /* expected output size = 98 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            1 /* input frame size in bytes */,
+            100000 /* count */,
+            100352 /* expected output size = 49 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            1 /* input frame size in bytes */,
+            100000 /* count */,
+            102400 /* expected output size = 25 * 4096 bytes/frame */);
+}
+
+// Push one big chunk.
+TEST_F(FrameReassemblerTest, PushBigChunk) {
+    ASSERT_EQ(OK, initStatus());
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_8,
+            100000 /* input frame size in bytes */,
+            1 /* count */,
+            100352 /* expected output size = 98 * 1024 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_16,
+            100000 /* input frame size in bytes */,
+            1 /* count */,
+            100352 /* expected output size = 49 * 2048 bytes/frame */);
+    testPushSameSize(
+            1024 /* frame size in samples */,
+            48000 /* sample rate */,
+            1 /* channel count */,
+            PCM_FLOAT,
+            100000 /* input frame size in bytes */,
+            1 /* count */,
+            102400 /* expected output size = 25 * 4096 bytes/frame */);
+}
+
+} // namespace android
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index dee3bf6..74ef9ea 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -102,16 +102,30 @@
 }
 
 static bool using_ion(void) {
-    static int cached_result = -1;
-
-    if (cached_result == -1) {
+    static int cached_result = []()->int {
         struct stat buffer;
-        cached_result = (stat("/dev/ion", &buffer) == 0);
-        if (cached_result)
+        int ret = (stat("/dev/ion", &buffer) == 0);
+
+        if (property_get_int32("debug.c2.use_dmabufheaps", 0)) {
+            /*
+             * Double check that the system heap is present so we
+             * can gracefully fail back to ION if we cannot satisfy
+             * the override
+             */
+            ret = (stat("/dev/dma_heap/system", &buffer) != 0);
+            if (ret)
+                ALOGE("debug.c2.use_dmabufheaps set, but no system heap. Ignoring override!");
+            else
+                ALOGD("debug.c2.use_dmabufheaps set, forcing DMABUF Heaps");
+        }
+
+        if (ret)
             ALOGD("Using ION\n");
         else
             ALOGD("Using DMABUF Heaps\n");
-    }
+        return ret;
+    }();
+
     return (cached_result == 1);
 }
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 2c40fbb..19d1d1a 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1075,6 +1075,40 @@
     return mOriginalSampleRate;
 }
 
+status_t AudioTrack::setDualMonoMode(audio_dual_mono_mode_t mode)
+{
+    AutoMutex lock(mLock);
+    return setDualMonoMode_l(mode);
+}
+
+status_t AudioTrack::setDualMonoMode_l(audio_dual_mono_mode_t mode)
+{
+    return mAudioTrack->setDualMonoMode(mode);
+}
+
+status_t AudioTrack::getDualMonoMode(audio_dual_mono_mode_t* mode) const
+{
+    AutoMutex lock(mLock);
+    return mAudioTrack->getDualMonoMode(mode);
+}
+
+status_t AudioTrack::setAudioDescriptionMixLevel(float leveldB)
+{
+    AutoMutex lock(mLock);
+    return setAudioDescriptionMixLevel_l(leveldB);
+}
+
+status_t AudioTrack::setAudioDescriptionMixLevel_l(float leveldB)
+{
+    return mAudioTrack->setAudioDescriptionMixLevel(leveldB);
+}
+
+status_t AudioTrack::getAudioDescriptionMixLevel(float* leveldB) const
+{
+    AutoMutex lock(mLock);
+    return mAudioTrack->getAudioDescriptionMixLevel(leveldB);
+}
+
 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
 {
     AutoMutex lock(mLock);
@@ -1082,7 +1116,11 @@
         return NO_ERROR;
     }
     if (isOffloadedOrDirect_l()) {
-        return INVALID_OPERATION;
+        status_t status = mAudioTrack->setPlaybackRateParameters(playbackRate);
+        if (status == NO_ERROR) {
+            mPlaybackRate = playbackRate;
+        }
+        return status;
     }
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         return INVALID_OPERATION;
@@ -1147,9 +1185,16 @@
     return NO_ERROR;
 }
 
-const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
+const AudioPlaybackRate& AudioTrack::getPlaybackRate()
 {
     AutoMutex lock(mLock);
+    if (isOffloadedOrDirect_l()) {
+        audio_playback_rate_t playbackRateTemp;
+        const status_t status = mAudioTrack->getPlaybackRateParameters(&playbackRateTemp);
+        if (status == NO_ERROR) { // update local version if changed.
+            mPlaybackRate = playbackRateTemp;
+        }
+    }
     return mPlaybackRate;
 }
 
@@ -1745,6 +1790,13 @@
     mProxy->setPlaybackRate(playbackRateTemp);
     mProxy->setMinimum(mNotificationFramesAct);
 
+    if (mDualMonoMode != AUDIO_DUAL_MONO_MODE_OFF) {
+        setDualMonoMode_l(mDualMonoMode);
+    }
+    if (mAudioDescriptionMixLeveldB != -std::numeric_limits<float>::infinity()) {
+        setAudioDescriptionMixLevel_l(mAudioDescriptionMixLeveldB);
+    }
+
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
 
diff --git a/media/libaudioclient/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
index 6219e7a..6fcf300 100644
--- a/media/libaudioclient/IAudioTrack.cpp
+++ b/media/libaudioclient/IAudioTrack.cpp
@@ -44,6 +44,12 @@
     SIGNAL,
     APPLY_VOLUME_SHAPER,
     GET_VOLUME_SHAPER_STATE,
+    SET_DUAL_MONO_MODE,
+    GET_DUAL_MONO_MODE,
+    SET_AUDIO_DESCRIPTION_MIX_LEVEL,
+    GET_AUDIO_DESCRIPTION_MIX_LEVEL,
+    SET_PLAYBACK_RATE_PARAMETERS,
+    GET_PLAYBACK_RATE_PARAMETERS,
 };
 
 class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -207,6 +213,92 @@
         }
         return state;
     }
+
+    status_t getDualMonoMode(audio_dual_mono_mode_t* mode) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_DUAL_MONO_MODE, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        if (status != NO_ERROR) {
+            return status;
+        }
+        *mode = (audio_dual_mono_mode_t)reply.readInt32();
+        return NO_ERROR;
+    }
+
+    status_t setDualMonoMode(audio_dual_mono_mode_t mode) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32((int32_t)mode);
+        status_t status = remote()->transact(SET_DUAL_MONO_MODE, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return reply.readInt32();
+    }
+
+    status_t getAudioDescriptionMixLevel(float* leveldB) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_AUDIO_DESCRIPTION_MIX_LEVEL, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        if (status != NO_ERROR) {
+            return status;
+        }
+        *leveldB = reply.readFloat();
+        return NO_ERROR;
+    }
+
+    status_t setAudioDescriptionMixLevel(float leveldB) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeFloat(leveldB);
+        status_t status = remote()->transact(SET_AUDIO_DESCRIPTION_MIX_LEVEL, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return reply.readInt32();
+    }
+
+    status_t getPlaybackRateParameters(audio_playback_rate_t* playbackRate) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_PLAYBACK_RATE_PARAMETERS, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        if (status != NO_ERROR) {
+            return status;
+        }
+        playbackRate->mSpeed = reply.readFloat();
+        playbackRate->mPitch = reply.readFloat();
+        playbackRate->mStretchMode =
+            static_cast<audio_timestretch_stretch_mode_t>(reply.readInt32());
+        playbackRate->mFallbackMode =
+            static_cast<audio_timestretch_fallback_mode_t>(reply.readInt32());
+        return NO_ERROR;
+    }
+
+    status_t setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeFloat(playbackRate.mSpeed);
+        data.writeFloat(playbackRate.mPitch);
+        data.writeInt32(playbackRate.mStretchMode);
+        data.writeInt32(playbackRate.mFallbackMode);
+        status_t status = remote()->transact(SET_PLAYBACK_RATE_PARAMETERS, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -309,6 +401,59 @@
             }
             return NO_ERROR;
         } break;
+        case SET_DUAL_MONO_MODE: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            reply->writeInt32( setDualMonoMode((audio_dual_mono_mode_t)data.readInt32()) );
+            return NO_ERROR;
+        } break;
+        case GET_DUAL_MONO_MODE: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            audio_dual_mono_mode_t mode;
+            const status_t status = getDualMonoMode(&mode);
+            reply->writeInt32((int32_t)status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(mode);
+            }
+            return NO_ERROR;
+        } break;
+        case SET_AUDIO_DESCRIPTION_MIX_LEVEL: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            reply->writeInt32( setAudioDescriptionMixLevel(data.readFloat()) );
+            return NO_ERROR;
+        } break;
+        case GET_AUDIO_DESCRIPTION_MIX_LEVEL: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            float f;
+            const status_t status = getAudioDescriptionMixLevel(&f);
+            reply->writeInt32((int32_t)status);
+            if (status == NO_ERROR) {
+                reply->writeFloat(f);
+            }
+            return NO_ERROR;
+        } break;
+        case SET_PLAYBACK_RATE_PARAMETERS: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            audio_playback_rate_t playbackRate = {
+                data.readFloat(),
+                data.readFloat(),
+                static_cast<audio_timestretch_stretch_mode_t>(data.readInt32()),
+                static_cast<audio_timestretch_fallback_mode_t>(data.readInt32())};
+            reply->writeInt32( setPlaybackRateParameters(playbackRate) );
+            return NO_ERROR;
+        } break;
+        case GET_PLAYBACK_RATE_PARAMETERS: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            audio_playback_rate_t playbackRate;
+            const status_t status = getPlaybackRateParameters(&playbackRate);
+            reply->writeInt32((int32_t)status);
+            if (status == NO_ERROR) {
+                reply->writeFloat(playbackRate.mSpeed);
+                reply->writeFloat(playbackRate.mPitch);
+                reply->writeInt32(playbackRate.mStretchMode);
+                reply->writeInt32(playbackRate.mFallbackMode);
+            }
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index de183d8..fac4c83 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -513,6 +513,18 @@
      */
             uint32_t    getOriginalSampleRate() const;
 
+    /* Sets the Dual Mono mode presentation on the output device. */
+            status_t    setDualMonoMode(audio_dual_mono_mode_t mode);
+
+    /* Returns the Dual Mono mode presentation setting. */
+            status_t    getDualMonoMode(audio_dual_mono_mode_t* mode) const;
+
+    /* Sets the Audio Description Mix level in dB. */
+            status_t    setAudioDescriptionMixLevel(float leveldB);
+
+    /* Returns the Audio Description Mix level in dB. */
+            status_t    getAudioDescriptionMixLevel(float* leveldB) const;
+
     /* Set source playback rate for timestretch
      * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
      * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
@@ -526,7 +538,7 @@
             status_t    setPlaybackRate(const AudioPlaybackRate &playbackRate);
 
     /* Return current playback rate */
-            const AudioPlaybackRate& getPlaybackRate() const;
+            const AudioPlaybackRate& getPlaybackRate();
 
     /* Enables looping and sets the start and end points of looping.
      * Only supported for static buffer mode.
@@ -1070,6 +1082,12 @@
 
             void     updateRoutedDeviceId_l();
 
+            /* Sets the Dual Mono mode presentation on the output device. */
+            status_t setDualMonoMode_l(audio_dual_mono_mode_t mode);
+
+            /* Sets the Audio Description Mix level in dB. */
+            status_t setAudioDescriptionMixLevel_l(float leveldB);
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -1282,6 +1300,10 @@
 
     wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 
+    // Cached values to restore along with the AudioTrack.
+    audio_dual_mono_mode_t mDualMonoMode = AUDIO_DUAL_MONO_MODE_OFF;
+    float mAudioDescriptionMixLeveldB = -std::numeric_limits<float>::infinity();
+
 private:
     class MediaMetrics {
       public:
diff --git a/media/libaudioclient/include/media/IAudioTrack.h b/media/libaudioclient/include/media/IAudioTrack.h
index 06e786d..dbbbf35 100644
--- a/media/libaudioclient/include/media/IAudioTrack.h
+++ b/media/libaudioclient/include/media/IAudioTrack.h
@@ -27,6 +27,7 @@
 #include <utils/String8.h>
 #include <media/AudioTimestamp.h>
 #include <media/VolumeShaper.h>
+#include <system/audio.h>
 
 namespace android {
 
@@ -86,6 +87,24 @@
 
     /* gets the volume shaper state */
     virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) = 0;
+
+    /* Returns the Dual Mono mode presentation setting. */
+    virtual status_t    getDualMonoMode(audio_dual_mono_mode_t* mode) = 0;
+
+    /* Sets the Dual Mono mode presentation on the output device. */
+    virtual status_t    setDualMonoMode(audio_dual_mono_mode_t mode) = 0;
+
+    /* Returns the Audio Description Mix level in dB. */
+    virtual status_t    getAudioDescriptionMixLevel(float* leveldB) = 0;
+
+    /* Sets the Audio Description Mix level in dB. */
+    virtual status_t    setAudioDescriptionMixLevel(float leveldB) = 0;
+
+    /* Retrieves current playback rate parameters. */
+    virtual status_t    getPlaybackRateParameters(audio_playback_rate_t* playbackRate) = 0;
+
+    /* Sets the playback rate parameters that control playback behavior. */
+    virtual status_t    setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index fab0fea..9a6272b 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -14,7 +14,6 @@
     ],
 
     required: [
-        "libaudiohal@2.0",
         "libaudiohal@4.0",
         "libaudiohal@5.0",
         "libaudiohal@6.0",
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index 7228b22..e420d07 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -35,7 +35,6 @@
     "6.0",
     "5.0",
     "4.0",
-    "2.0",
     nullptr
 };
 
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index df006b5..7b116bd 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -53,22 +53,6 @@
 }
 
 cc_library_shared {
-    name: "libaudiohal@2.0",
-    defaults: ["libaudiohal_default"],
-    shared_libs: [
-        "android.hardware.audio.common@2.0",
-        "android.hardware.audio.common@2.0-util",
-        "android.hardware.audio.effect@2.0",
-        "android.hardware.audio@2.0",
-    ],
-    cflags: [
-        "-DMAJOR_VERSION=2",
-        "-DMINOR_VERSION=0",
-        "-include common/all-versions/VersionMacro.h",
-    ]
-}
-
-cc_library_shared {
     name: "libaudiohal@4.0",
     defaults: ["libaudiohal_default"],
     shared_libs: [
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index ebed5fd..cf07a47 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -50,12 +50,20 @@
             halKeys.get(String8(AUDIO_PARAMETER_DEVICE_SUP_ENCAPSULATION_METADATA_TYPES),
                         value) == NO_ERROR;
 
+    const bool keepDelayValue =
+            halKeys.get(String8(AUDIO_PARAMETER_DEVICE_ADDITIONAL_OUTPUT_DELAY),
+                        value) == NO_ERROR ||
+            halKeys.get(String8(AUDIO_PARAMETER_DEVICE_MAX_ADDITIONAL_OUTPUT_DELAY),
+                        value) == NO_ERROR;
+
     for (size_t i = 0; i < halKeys.size(); ++i) {
         String8 key;
         status_t status = halKeys.getAt(i, key);
         if (status != OK) return status;
         if ((keepFormatValue && key == AudioParameter::keyFormat) ||
-            (keepRoutingValue && key == AudioParameter::keyRouting)) {
+            (keepRoutingValue && key == AudioParameter::keyRouting) ||
+            (keepDelayValue && key == AUDIO_PARAMETER_DEVICE_ADDITIONAL_OUTPUT_DELAY) ||
+            (keepDelayValue && key == AUDIO_PARAMETER_DEVICE_MAX_ADDITIONAL_OUTPUT_DELAY)) {
             AudioParameter keepValueParam;
             halKeys.getAt(i, key, value);
             keepValueParam.add(key, value);
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 09a7c1c..8a9eec3 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -636,6 +636,32 @@
 #endif
 
 #if MAJOR_VERSION < 6
+status_t StreamOutHalHidl::getDualMonoMode(audio_dual_mono_mode_t* mode __unused) {
+    return INVALID_OPERATION;
+}
+
+status_t StreamOutHalHidl::setDualMonoMode(audio_dual_mono_mode_t mode __unused) {
+    return INVALID_OPERATION;
+}
+
+status_t StreamOutHalHidl::getAudioDescriptionMixLevel(float* leveldB __unused) {
+    return INVALID_OPERATION;
+}
+
+status_t StreamOutHalHidl::setAudioDescriptionMixLevel(float leveldB __unused) {
+    return INVALID_OPERATION;
+}
+
+status_t StreamOutHalHidl::getPlaybackRateParameters(
+        audio_playback_rate_t* playbackRate __unused) {
+    return INVALID_OPERATION;
+}
+
+status_t StreamOutHalHidl::setPlaybackRateParameters(
+        const audio_playback_rate_t& playbackRate __unused) {
+    return INVALID_OPERATION;
+}
+
 status_t StreamOutHalHidl::setEventCallback(
         const sp<StreamOutHalInterfaceEventCallback>& callback __unused) {
     // Codec format callback is supported starting from audio HAL V6.0
@@ -643,6 +669,73 @@
 }
 #else
 
+status_t StreamOutHalHidl::getDualMonoMode(audio_dual_mono_mode_t* mode) {
+    if (mStream == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getDualMonoMode(
+            [&](Result r, DualMonoMode hidlMode) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *mode = static_cast<audio_dual_mono_mode_t>(hidlMode);
+                }
+            });
+    return processReturn("getDualMonoMode", ret, retval);
+}
+
+status_t StreamOutHalHidl::setDualMonoMode(audio_dual_mono_mode_t mode) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn(
+            "setDualMonoMode", mStream->setDualMonoMode(static_cast<DualMonoMode>(mode)));
+}
+
+status_t StreamOutHalHidl::getAudioDescriptionMixLevel(float* leveldB) {
+    if (mStream == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getAudioDescriptionMixLevel(
+            [&](Result r, float hidlLeveldB) {
+                retval = r;
+                if (retval == Result::OK) {
+                    *leveldB = hidlLeveldB;
+                }
+            });
+    return processReturn("getAudioDescriptionMixLevel", ret, retval);
+}
+
+status_t StreamOutHalHidl::setAudioDescriptionMixLevel(float leveldB) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn(
+            "setAudioDescriptionMixLevel", mStream->setAudioDescriptionMixLevel(leveldB));
+}
+
+status_t StreamOutHalHidl::getPlaybackRateParameters(audio_playback_rate_t* playbackRate) {
+    if (mStream == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getPlaybackRateParameters(
+            [&](Result r, PlaybackRate hidlPlaybackRate) {
+                retval = r;
+                if (retval == Result::OK) {
+                    playbackRate->mSpeed = hidlPlaybackRate.speed;
+                    playbackRate->mPitch = hidlPlaybackRate.pitch;
+                    playbackRate->mStretchMode =
+                        static_cast<audio_timestretch_stretch_mode_t>(
+                            hidlPlaybackRate.timestretchMode);
+                    playbackRate->mFallbackMode =
+                        static_cast<audio_timestretch_fallback_mode_t>(
+                            hidlPlaybackRate.fallbackMode);
+                }
+            });
+    return processReturn("getPlaybackRateParameters", ret, retval);
+}
+
+status_t StreamOutHalHidl::setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) {
+    if (mStream == 0) return NO_INIT;
+    return processReturn(
+            "setPlaybackRateParameters", mStream->setPlaybackRateParameters(
+                PlaybackRate{playbackRate.mSpeed, playbackRate.mPitch,
+                    static_cast<TimestretchMode>(playbackRate.mStretchMode),
+                    static_cast<TimestretchFallbackMode>(playbackRate.mFallbackMode)}));
+}
+
 #include PATH(android/hardware/audio/FILE_VERSION/IStreamOutEventCallback.h)
 
 namespace {
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 88f8587..2db4973 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -173,6 +173,24 @@
     void onDrainReady();
     void onError();
 
+    // Returns the Dual Mono mode presentation setting.
+    status_t getDualMonoMode(audio_dual_mono_mode_t* mode) override;
+
+    // Sets the Dual Mono mode presentation on the output device.
+    status_t setDualMonoMode(audio_dual_mono_mode_t mode) override;
+
+    // Returns the Audio Description Mix level in dB.
+    status_t getAudioDescriptionMixLevel(float* leveldB) override;
+
+    // Sets the Audio Description Mix level in dB.
+    status_t setAudioDescriptionMixLevel(float leveldB) override;
+
+    // Retrieves current playback rate parameters.
+    status_t getPlaybackRateParameters(audio_playback_rate_t* playbackRate) override;
+
+    // Sets the playback rate parameters that control playback behavior.
+    status_t setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) override;
+
     status_t setEventCallback(const sp<StreamOutHalInterfaceEventCallback>& callback) override;
 
     // Methods used by StreamCodecFormatCallback (HIDL).
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index f544e06..a3f2fb4 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -311,6 +311,36 @@
     return mStream->get_mmap_position(mStream, position);
 }
 
+status_t StreamOutHalLocal::getDualMonoMode(audio_dual_mono_mode_t* mode) {
+    if (mStream->get_dual_mono_mode == nullptr) return INVALID_OPERATION;
+    return mStream->get_dual_mono_mode(mStream, mode);
+}
+
+status_t StreamOutHalLocal::setDualMonoMode(audio_dual_mono_mode_t mode) {
+    if (mStream->set_dual_mono_mode == nullptr) return INVALID_OPERATION;
+    return mStream->set_dual_mono_mode(mStream, mode);
+}
+
+status_t StreamOutHalLocal::getAudioDescriptionMixLevel(float* leveldB) {
+    if (mStream->get_audio_description_mix_level == nullptr) return INVALID_OPERATION;
+    return mStream->get_audio_description_mix_level(mStream, leveldB);
+}
+
+status_t StreamOutHalLocal::setAudioDescriptionMixLevel(float leveldB) {
+    if (mStream->set_audio_description_mix_level == nullptr) return INVALID_OPERATION;
+    return mStream->set_audio_description_mix_level(mStream, leveldB);
+}
+
+status_t StreamOutHalLocal::getPlaybackRateParameters(audio_playback_rate_t* playbackRate) {
+    if (mStream->get_playback_rate_parameters == nullptr) return INVALID_OPERATION;
+    return mStream->get_playback_rate_parameters(mStream, playbackRate);
+}
+
+status_t StreamOutHalLocal::setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) {
+    if (mStream->set_playback_rate_parameters == nullptr) return INVALID_OPERATION;
+    return mStream->set_playback_rate_parameters(mStream, &playbackRate);
+}
+
 status_t StreamOutHalLocal::setEventCallback(
         const sp<StreamOutHalInterfaceEventCallback>& callback) {
     if (mStream->set_event_callback == nullptr) {
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index 8e5180f..e228104 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -156,6 +156,24 @@
     // Called when the metadata of the stream's source has been changed.
     status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
 
+    // Returns the Dual Mono mode presentation setting.
+    status_t getDualMonoMode(audio_dual_mono_mode_t* mode) override;
+
+    // Sets the Dual Mono mode presentation on the output device.
+    status_t setDualMonoMode(audio_dual_mono_mode_t mode) override;
+
+    // Returns the Audio Description Mix level in dB.
+    status_t getAudioDescriptionMixLevel(float* leveldB) override;
+
+    // Sets the Audio Description Mix level in dB.
+    status_t setAudioDescriptionMixLevel(float leveldB) override;
+
+    // Retrieves current playback rate parameters.
+    status_t getPlaybackRateParameters(audio_playback_rate_t* playbackRate) override;
+
+    // Sets the playback rate parameters that control playback behavior.
+    status_t setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) override;
+
     status_t setEventCallback(const sp<StreamOutHalInterfaceEventCallback>& callback) override;
 
   private:
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 523705e..097e9a2 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -160,12 +160,31 @@
     struct SourceMetadata {
         std::vector<playback_track_metadata_v7_t> tracks;
     };
+
     /**
      * Called when the metadata of the stream's source has been changed.
      * @param sourceMetadata Description of the audio that is played by the clients.
      */
     virtual status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) = 0;
 
+    // Returns the Dual Mono mode presentation setting.
+    virtual status_t getDualMonoMode(audio_dual_mono_mode_t* mode) = 0;
+
+    // Sets the Dual Mono mode presentation on the output device.
+    virtual status_t setDualMonoMode(audio_dual_mono_mode_t mode) = 0;
+
+    // Returns the Audio Description Mix level in dB.
+    virtual status_t getAudioDescriptionMixLevel(float* leveldB) = 0;
+
+    // Sets the Audio Description Mix level in dB.
+    virtual status_t setAudioDescriptionMixLevel(float leveldB) = 0;
+
+    // Retrieves current playback rate parameters.
+    virtual status_t getPlaybackRateParameters(audio_playback_rate_t* playbackRate) = 0;
+
+    // Sets the playback rate parameters that control playback behavior.
+    virtual status_t setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) = 0;
+
     virtual status_t setEventCallback(const sp<StreamOutHalInterfaceEventCallback>& callback) = 0;
 
   protected:
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index b439880..5e8255a 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -19,3 +19,14 @@
         "libhardware_headers",
     ],
 }
+
+cc_test {
+    name: "correlation",
+    host_supported: true,
+    srcs: ["correlation.cpp"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
diff --git a/media/libeffects/preprocessing/tests/correlation.cpp b/media/libeffects/preprocessing/tests/correlation.cpp
new file mode 100644
index 0000000..b13dcc7
--- /dev/null
+++ b/media/libeffects/preprocessing/tests/correlation.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <vector>
+
+constexpr int kMinLoopLimitValue = 1;
+constexpr int kNumPeaks = 3;
+
+/*!
+  \brief           Compute the length normalized correlation of two signals
+
+  \sigX            Pointer to signal 1
+  \sigY            Pointer to signal 2
+  \len             Length of signals
+  \enableCrossCorr Flag to be set to 1 if cross-correlation is needed
+
+  \return          First value is vector of correlation peak indices
+                   Second value is vector of correlation peak values
+*/
+
+static std::pair<std::vector<int>, std::vector<float>> correlation(const int16_t* sigX,
+                                                                   const int16_t* sigY, int len,
+                                                                   int16_t enableCrossCorr) {
+    float maxCorrVal = 0.f, prevCorrVal = 0.f;
+    int delay = 0, peakIndex = 0, flag = 0;
+    int loopLim = (1 == enableCrossCorr) ? len : kMinLoopLimitValue;
+    std::vector<int> peakIndexVect(kNumPeaks, 0);
+    std::vector<float> peakValueVect(kNumPeaks, 0.f);
+    for (int i = 0; i < loopLim; i++) {
+        float corrVal = 0.f;
+        for (int j = i; j < len; j++) {
+            corrVal += (float)(sigX[j] * sigY[j - i]);
+        }
+        corrVal /= len - i;
+        if (corrVal > maxCorrVal) {
+            delay = i;
+            maxCorrVal = corrVal;
+        }
+        // Correlation peaks are expected to be observed at equal intervals. The interval length is
+        // expected to match with wave period.
+        // The following block of code saves the first kNumPeaks number of peaks and the index at
+        // which they occur.
+        if (peakIndex < kNumPeaks) {
+            if (corrVal > prevCorrVal) {
+                peakIndexVect[peakIndex] = i;
+                peakValueVect[peakIndex] = corrVal;
+                flag = 0;
+            } else if (0 == flag) {
+                peakIndex++;
+                flag = 1;
+            }
+        }
+        if (peakIndex == kNumPeaks) break;
+        prevCorrVal = corrVal;
+    }
+    return {peakIndexVect, peakValueVect};
+}
+
+void printUsage() {
+    printf("\nUsage: ");
+    printf("\n     correlation <firstFile> <secondFile> [enableCrossCorr]\n");
+    printf("\nwhere, \n     <firstFile>       is the first file name");
+    printf("\n     <secondFile>      is the second file name");
+    printf("\n     [enableCrossCorr] is flag to set for cross-correlation (Default 1)\n\n");
+}
+
+int main(int argc, const char* argv[]) {
+    if (argc < 3) {
+        printUsage();
+        return EXIT_FAILURE;
+    }
+
+    std::unique_ptr<FILE, decltype(&fclose)> fInput1(fopen(argv[1], "rb"), &fclose);
+    if (fInput1.get() == NULL) {
+        printf("\nError: missing file %s\n", argv[1]);
+        return EXIT_FAILURE;
+    }
+    std::unique_ptr<FILE, decltype(&fclose)> fInput2(fopen(argv[2], "rb"), &fclose);
+    if (fInput2.get() == NULL) {
+        printf("\nError: missing file %s\n", argv[2]);
+        return EXIT_FAILURE;
+    }
+    int16_t enableCrossCorr = (4 == argc) ? atoi(argv[3]) : 1;
+
+    fseek(fInput1.get(), 0L, SEEK_END);
+    unsigned int fileSize1 = ftell(fInput1.get());
+    rewind(fInput1.get());
+    fseek(fInput2.get(), 0L, SEEK_END);
+    unsigned int fileSize2 = ftell(fInput2.get());
+    rewind(fInput2.get());
+    if (fileSize1 != fileSize2) {
+        printf("\nError: File sizes different\n");
+        return EXIT_FAILURE;
+    }
+
+    int numFrames = fileSize1 / sizeof(int16_t);
+    std::unique_ptr<int16_t[]> inBuffer1(new int16_t[numFrames]());
+    std::unique_ptr<int16_t[]> inBuffer2(new int16_t[numFrames]());
+
+    fread(inBuffer1.get(), sizeof(int16_t), numFrames, fInput1.get());
+    fread(inBuffer2.get(), sizeof(int16_t), numFrames, fInput2.get());
+
+    auto pairAutoCorr1 = correlation(inBuffer1.get(), inBuffer1.get(), numFrames, enableCrossCorr);
+    auto pairAutoCorr2 = correlation(inBuffer2.get(), inBuffer2.get(), numFrames, enableCrossCorr);
+
+    // Following code block checks pitch period difference between two input signals. They must
+    // match as AGC applies only gain, no frequency related computation is done.
+    bool pitchMatch = false;
+    for (unsigned i = 0; i < pairAutoCorr1.first.size() - 1; i++) {
+        if (pairAutoCorr1.first[i + 1] - pairAutoCorr1.first[i] !=
+            pairAutoCorr2.first[i + 1] - pairAutoCorr2.first[i]) {
+            pitchMatch = false;
+            break;
+        }
+        pitchMatch = true;
+    }
+    if (pitchMatch) {
+        printf("Auto-correlation  : Pitch matched\n");
+    } else {
+        printf("Auto-correlation  : Pitch mismatch\n");
+        return EXIT_FAILURE;
+    }
+
+    if (enableCrossCorr) {
+        auto pairCrossCorr =
+                correlation(inBuffer1.get(), inBuffer2.get(), numFrames, enableCrossCorr);
+
+        // Since AGC applies only gain, the pitch information obtained from cross correlation data
+        // of input and output is expected to be same as the input signal's pitch information.
+        pitchMatch = false;
+        for (unsigned i = 0; i < pairCrossCorr.first.size() - 1; i++) {
+            if (pairAutoCorr1.first[i + 1] - pairAutoCorr1.first[i] !=
+                pairCrossCorr.first[i + 1] - pairCrossCorr.first[i]) {
+                pitchMatch = false;
+                break;
+            }
+            pitchMatch = true;
+        }
+        if (pitchMatch) {
+            printf("Cross-correlation : Pitch matched for AGC\n");
+            if (pairAutoCorr1.second[0]) {
+                printf("Expected gain     : (maxCrossCorr / maxAutoCorr1) = %f\n",
+                       pairCrossCorr.second[0] / pairAutoCorr1.second[0]);
+            }
+        } else {
+            printf("Cross-correlation : Pitch mismatch\n");
+            return EXIT_FAILURE;
+        }
+    }
+
+    return EXIT_SUCCESS;
+}
diff --git a/media/libmediahelper/AudioParameter.cpp b/media/libmediahelper/AudioParameter.cpp
index fc8306c..73c1e41 100644
--- a/media/libmediahelper/AudioParameter.cpp
+++ b/media/libmediahelper/AudioParameter.cpp
@@ -57,6 +57,10 @@
 //        AUDIO_PARAMETER_DEVICE_SUP_ENCAPSULATION_MODES;
 // const char * const AudioParameter::keyDeviceSupportedEncapsulationMetadataTypes =
 //        AUDIO_PARAMETER_DEVICE_SUP_ENCAPSULATION_METADATA_TYPES;
+// const char * const AudioParameter::keyAdditionalOutputDeviceDelay =
+//        AUDIO_PARAMETER_DEVICE_ADDITIONAL_OUTPUT_DELAY;
+// const char * const AudioParameter::keyMaxAdditionalOutputDeviceDelay =
+//        AUDIO_PARAMETER_DEVICE_MAX_ADDITIONAL_OUTPUT_DELAY;
 
 AudioParameter::AudioParameter(const String8& keyValuePairs)
 {
diff --git a/media/libmediahelper/include/media/AudioParameter.h b/media/libmediahelper/include/media/AudioParameter.h
index 66d8dfb..b72d0d5 100644
--- a/media/libmediahelper/include/media/AudioParameter.h
+++ b/media/libmediahelper/include/media/AudioParameter.h
@@ -104,6 +104,9 @@
     // static const char * const keyDeviceSupportedEncapsulationModes;
     // static const char * const keyDeviceSupportedEncapsulationMetadataTypes;
 
+    // static const char * const keyAdditionalOutputDeviceDelay;
+    // static const char * const keyMaxAdditionalOutputDeviceDelay;
+
     String8 toString() const { return toStringImpl(true); }
     String8 keysToString() const { return toStringImpl(false); }
 
diff --git a/media/libmediahelper/tests/typeconverter_tests.cpp b/media/libmediahelper/tests/typeconverter_tests.cpp
index d7bfb89..181d636 100644
--- a/media/libmediahelper/tests/typeconverter_tests.cpp
+++ b/media/libmediahelper/tests/typeconverter_tests.cpp
@@ -182,8 +182,9 @@
         audio_format_t format;
         EXPECT_TRUE(FormatConverter::fromString(stringVal, format))
                 << "Conversion of \"" << stringVal << "\" failed";
-        EXPECT_TRUE(audio_is_valid_format(format))
-                << "Converted format \"" << stringVal << "\" is invalid";
+        EXPECT_EQ(enumVal != xsd::AudioFormat::AUDIO_FORMAT_DEFAULT,
+                audio_is_valid_format(format))
+                << "Validity of \"" << stringVal << "\" is not as expected";
         EXPECT_EQ(stringVal, toString(format));
     }
 }
diff --git a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
index 5a52ea5..d08c66d 100644
--- a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
+++ b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
@@ -41,7 +41,7 @@
         "libmediandk",
     ],
 
-    compile_multilib: "32",
+    compile_multilib: "prefer32",
 
     cflags: [
         "-Werror",
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 4fe871f..0ed0de1 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -317,7 +317,7 @@
 
 class MediaCodec::ReleaseSurface {
 public:
-    ReleaseSurface() {
+    explicit ReleaseSurface(uint64_t usage) {
         BufferQueue::createBufferQueue(&mProducer, &mConsumer);
         mSurface = new Surface(mProducer, false /* controlledByApp */);
         struct ConsumerListener : public BnConsumerListener {
@@ -328,6 +328,7 @@
         sp<ConsumerListener> listener{new ConsumerListener};
         mConsumer->consumerConnect(listener, false);
         mConsumer->setConsumerName(String8{"MediaCodec.release"});
+        mConsumer->setConsumerUsageBits(usage);
     }
 
     const sp<Surface> &getSurface() {
@@ -3122,7 +3123,11 @@
             if (asyncNotify != nullptr) {
                 if (mSurface != NULL) {
                     if (!mReleaseSurface) {
-                        mReleaseSurface.reset(new ReleaseSurface);
+                        uint64_t usage = 0;
+                        if (mSurface->getConsumerUsage(&usage) != OK) {
+                            usage = 0;
+                        }
+                        mReleaseSurface.reset(new ReleaseSurface(usage));
                     }
                     if (mSurface != mReleaseSurface->getSurface()) {
                         status_t err = connectToSurface(mReleaseSurface->getSurface());
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index a7f94c1..4a505d4 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -9,7 +9,6 @@
         "libmedia",
         "libstagefright",
         "libstagefright_foundation",
-        "libstagefright_omx",
         "libutils",
         "liblog",
     ],
@@ -17,11 +16,8 @@
     include_dirs: [
         "frameworks/av/media/libstagefright",
         "frameworks/av/media/libstagefright/include",
-        "frameworks/native/include/media/openmax",
     ],
 
-    compile_multilib: "32",
-
     cflags: [
         "-Werror",
         "-Wall",
@@ -44,4 +40,4 @@
         "-Werror",
         "-Wall",
     ],
-}
\ No newline at end of file
+}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 8fe18de..4ac46b7 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1462,6 +1462,20 @@
     }
 }
 
+// Update downstream patches for all playback threads attached to an MSD module
+void AudioFlinger::updateDownStreamPatches_l(const struct audio_patch *patch,
+                                             const std::set<audio_io_handle_t> streams)
+{
+    for (const audio_io_handle_t stream : streams) {
+        PlaybackThread *playbackThread = checkPlaybackThread_l(stream);
+        if (playbackThread == nullptr || !playbackThread->isMsdDevice()) {
+            continue;
+        }
+        playbackThread->setDownStreamPatch(patch);
+        playbackThread->sendIoConfigEvent(AUDIO_OUTPUT_CONFIG_CHANGED);
+    }
+}
+
 // Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
 // Some keys are used for audio routing and audio path configuration and should be reserved for use
 // by audio policy and audio flinger for functional, privacy and security reasons.
@@ -2534,7 +2548,11 @@
                       *output, thread.get());
             }
             mPlaybackThreads.add(*output, thread);
-            mPatchPanel.notifyStreamOpened(outHwDev, *output);
+            struct audio_patch patch;
+            mPatchPanel.notifyStreamOpened(outHwDev, *output, &patch);
+            if (thread->isMsdDevice()) {
+                thread->setDownStreamPatch(&patch);
+            }
             return thread;
         }
     }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c47afd5..10d4029 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -320,6 +320,9 @@
     status_t removeEffectFromHal(audio_port_handle_t deviceId,
             audio_module_handle_t hwModuleId, sp<EffectHalInterface> effect);
 
+    void updateDownStreamPatches_l(const struct audio_patch *patch,
+                                   const std::set<audio_io_handle_t> streams);
+
 private:
     // FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
     static const size_t kLogMemorySize = 400 * 1024;
@@ -642,6 +645,13 @@
         virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) override;
         virtual status_t    getTimestamp(AudioTimestamp& timestamp);
         virtual void        signal(); // signal playback thread for a change in control block
+                status_t    getDualMonoMode(audio_dual_mono_mode_t* mode) override;
+                status_t    setDualMonoMode(audio_dual_mono_mode_t mode) override;
+                status_t    getAudioDescriptionMixLevel(float* leveldB) override;
+                status_t    setAudioDescriptionMixLevel(float leveldB) override;
+                status_t    getPlaybackRateParameters(audio_playback_rate_t* playbackRate) override;
+                status_t    setPlaybackRateParameters(
+                        const audio_playback_rate_t& playbackRate) override;
 
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index b58fd8b..37aa13e 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -413,10 +413,10 @@
         *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
         newPatch.mHalHandle = halHandle;
         mAudioFlinger.mDeviceEffectManager.createAudioPatch(*handle, newPatch);
-        mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
         if (insertedModule != AUDIO_MODULE_HANDLE_NONE) {
-            addSoftwarePatchToInsertedModules(insertedModule, *handle);
+            addSoftwarePatchToInsertedModules(insertedModule, *handle, &newPatch.mAudioPatch);
         }
+        mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
     } else {
         newPatch.clearConnections(this);
     }
@@ -781,10 +781,20 @@
 }
 
 void AudioFlinger::PatchPanel::notifyStreamOpened(
-        AudioHwDevice *audioHwDevice, audio_io_handle_t stream)
+        AudioHwDevice *audioHwDevice, audio_io_handle_t stream, struct audio_patch *patch)
 {
     if (audioHwDevice->isInsert()) {
         mInsertedModules[audioHwDevice->handle()].streams.insert(stream);
+        if (patch != nullptr) {
+            std::vector <SoftwarePatch> swPatches;
+            getDownstreamSoftwarePatches(stream, &swPatches);
+            if (swPatches.size() > 0) {
+                auto iter = mPatches.find(swPatches[0].getPatchHandle());
+                if (iter != mPatches.end()) {
+                    *patch = iter->second.mAudioPatch;
+                }
+            }
+        }
     }
 }
 
@@ -813,9 +823,13 @@
 }
 
 void AudioFlinger::PatchPanel::addSoftwarePatchToInsertedModules(
-        audio_module_handle_t module, audio_patch_handle_t handle)
+        audio_module_handle_t module, audio_patch_handle_t handle,
+        const struct audio_patch *patch)
 {
     mInsertedModules[module].sw_patches.insert(handle);
+    if (!mInsertedModules[module].streams.empty()) {
+        mAudioFlinger.updateDownStreamPatches_l(patch, mInsertedModules[module].streams);
+    }
 }
 
 void AudioFlinger::PatchPanel::removeSoftwarePatchFromInsertedModules(
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 89d4eb1..ea38559 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -71,7 +71,8 @@
             std::vector<SoftwarePatch> *patches) const;
 
     // Notifies patch panel about all opened and closed streams.
-    void notifyStreamOpened(AudioHwDevice *audioHwDevice, audio_io_handle_t stream);
+    void notifyStreamOpened(AudioHwDevice *audioHwDevice, audio_io_handle_t stream,
+                            struct audio_patch *patch);
     void notifyStreamClosed(audio_io_handle_t stream);
 
     void dump(int fd) const;
@@ -226,7 +227,8 @@
     AudioHwDevice* findAudioHwDeviceByModule(audio_module_handle_t module);
     sp<DeviceHalInterface> findHwDeviceByModule(audio_module_handle_t module);
     void addSoftwarePatchToInsertedModules(
-            audio_module_handle_t module, audio_patch_handle_t handle);
+            audio_module_handle_t module, audio_patch_handle_t handle,
+            const struct audio_patch *patch);
     void removeSoftwarePatchFromInsertedModules(audio_patch_handle_t handle);
     void erasePatch(audio_patch_handle_t handle);
 
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a23d88c..7804822 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -117,6 +117,12 @@
             int         auxEffectId() const { return mAuxEffectId; }
     virtual status_t    getTimestamp(AudioTimestamp& timestamp);
             void        signal();
+            status_t    getDualMonoMode(audio_dual_mono_mode_t* mode);
+            status_t    setDualMonoMode(audio_dual_mono_mode_t mode);
+            status_t    getAudioDescriptionMixLevel(float* leveldB);
+            status_t    setAudioDescriptionMixLevel(float leveldB);
+            status_t    getPlaybackRateParameters(audio_playback_rate_t* playbackRate);
+            status_t    setPlaybackRateParameters(const audio_playback_rate_t& playbackRate);
 
 // implement FastMixerState::VolumeProvider interface
     virtual gain_minifloat_packed_t getVolumeLR();
@@ -281,6 +287,10 @@
     /** How many frames should be in the buffer before the track is considered ready */
     const size_t        mFrameCountToBeReady;
 
+    audio_dual_mono_mode_t mDualMonoMode = AUDIO_DUAL_MONO_MODE_OFF;
+    float               mAudioDescriptionMixLevel = -std::numeric_limits<float>::infinity();
+    audio_playback_rate_t  mPlaybackRateParameters = AUDIO_PLAYBACK_RATE_INITIALIZER;
+
 private:
     void                interceptBuffer(const AudioBufferProvider::Buffer& buffer);
     template <class F>
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 6b37fd0..927d87e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1864,7 +1864,8 @@
         // index 0 is reserved for normal mixer's submix
         mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
         mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
-        mLeftVolFloat(-1.0), mRightVolFloat(-1.0)
+        mLeftVolFloat(-1.0), mRightVolFloat(-1.0),
+        mDownStreamPatch{}
 {
     snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
     mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
@@ -2632,12 +2633,16 @@
     ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
 
     desc->mIoHandle = mId;
+    struct audio_patch patch = mPatch;
+    if (isMsdDevice()) {
+        patch = mDownStreamPatch;
+    }
 
     switch (event) {
     case AUDIO_OUTPUT_OPENED:
     case AUDIO_OUTPUT_REGISTERED:
     case AUDIO_OUTPUT_CONFIG_CHANGED:
-        desc->mPatch = mPatch;
+        desc->mPatch = patch;
         desc->mChannelMask = mChannelMask;
         desc->mSamplingRate = mSampleRate;
         desc->mFormat = mFormat;
@@ -2647,7 +2652,7 @@
         desc->mLatency = latency_l();
         break;
     case AUDIO_CLIENT_STARTED:
-        desc->mPatch = mPatch;
+        desc->mPatch = patch;
         desc->mPortId = portId;
         break;
     case AUDIO_OUTPUT_CLOSED:
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 6b33ad5..709a3cc 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -940,6 +940,11 @@
                                         && outDeviceTypes().count(mTimestampCorrectedDevice) != 0;
                             }
 
+                void setDownStreamPatch(const struct audio_patch *patch) {
+                    Mutex::Autolock _l(mLock);
+                    mDownStreamPatch = *patch;
+                }
+
 protected:
     // updated by readOutputParameters_l()
     size_t                          mNormalFrameCount;  // normal mixer and effects
@@ -1218,6 +1223,10 @@
                 // volumes last sent to audio HAL with stream->setVolume()
                 float mLeftVolFloat;
                 float mRightVolFloat;
+
+                // audio patch used by the downstream software patch.
+                // Only used if ThreadBase::mIsMsdDevice is true.
+                struct audio_patch mDownStreamPatch;
 };
 
 class MixerThread : public PlaybackThread {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index b17c0bc..ee886d5 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -54,6 +54,60 @@
 
 namespace android {
 
+// Validation methods for input
+namespace {
+
+status_t validateAudioDescriptionMixLevel(float leveldB)
+{
+    constexpr float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
+    return std::isnan(leveldB) || leveldB > MAX_AUDIO_DESCRIPTION_MIX_LEVEL ? BAD_VALUE : OK;
+}
+
+status_t validateDualMonoMode(audio_dual_mono_mode_t dualMonoMode)
+{
+    switch (dualMonoMode) {
+        case AUDIO_DUAL_MONO_MODE_OFF:
+        case AUDIO_DUAL_MONO_MODE_LR:
+        case AUDIO_DUAL_MONO_MODE_LL:
+        case AUDIO_DUAL_MONO_MODE_RR:
+        return OK;
+    }
+    return BAD_VALUE;
+}
+
+status_t validatePlaybackRateFallbackMode(
+        audio_timestretch_fallback_mode_t fallbackMode)
+{
+    switch (fallbackMode) {
+        case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
+            break; // warning if not listed.
+        case AUDIO_TIMESTRETCH_FALLBACK_DEFAULT:
+        case AUDIO_TIMESTRETCH_FALLBACK_MUTE:
+        case AUDIO_TIMESTRETCH_FALLBACK_FAIL:
+            return OK;
+    }
+    return BAD_VALUE;
+}
+
+status_t validatePlaybackRateStretchMode(audio_timestretch_stretch_mode_t stretchMode)
+{
+    switch (stretchMode) {
+        case AUDIO_TIMESTRETCH_STRETCH_DEFAULT:
+        case AUDIO_TIMESTRETCH_STRETCH_VOICE:
+            return OK;
+    }
+    return BAD_VALUE;
+}
+
+status_t validatePlaybackRate(const audio_playback_rate_t& playbackRate)
+{
+    if (playbackRate.mSpeed < 0.f || playbackRate.mPitch < 0.f) return BAD_VALUE;
+    return validatePlaybackRateFallbackMode(playbackRate.mFallbackMode) ?:
+            validatePlaybackRateStretchMode(playbackRate.mStretchMode);
+}
+
+} // namespace
+
 using media::VolumeShaper;
 // ----------------------------------------------------------------------------
 //      TrackBase
@@ -367,12 +421,45 @@
     return mTrack->getTimestamp(timestamp);
 }
 
-
 void AudioFlinger::TrackHandle::signal()
 {
     return mTrack->signal();
 }
 
+status_t AudioFlinger::TrackHandle::getDualMonoMode(audio_dual_mono_mode_t* mode)
+{
+    return mTrack->getDualMonoMode(mode);
+}
+
+status_t AudioFlinger::TrackHandle::setDualMonoMode(audio_dual_mono_mode_t mode)
+{
+    return validateDualMonoMode(mode) ?: mTrack->setDualMonoMode(mode);
+}
+
+status_t AudioFlinger::TrackHandle::getAudioDescriptionMixLevel(float* leveldB)
+{
+    return mTrack->getAudioDescriptionMixLevel(leveldB);
+}
+
+status_t AudioFlinger::TrackHandle::setAudioDescriptionMixLevel(float leveldB)
+{
+    return validateAudioDescriptionMixLevel(leveldB)
+            ?: mTrack->setAudioDescriptionMixLevel(leveldB);
+}
+
+status_t AudioFlinger::TrackHandle::getPlaybackRateParameters(
+        audio_playback_rate_t* playbackRate)
+{
+    return mTrack->getPlaybackRateParameters(playbackRate);
+}
+
+status_t AudioFlinger::TrackHandle::setPlaybackRateParameters(
+        const audio_playback_rate_t& playbackRate)
+{
+    return validatePlaybackRate(playbackRate)
+            ?: mTrack->setPlaybackRateParameters(playbackRate);
+}
+
 status_t AudioFlinger::TrackHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -1456,6 +1543,108 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::getDualMonoMode(audio_dual_mono_mode_t* mode)
+{
+    status_t status = INVALID_OPERATION;
+    if (isOffloadedOrDirect()) {
+        sp<ThreadBase> thread = mThread.promote();
+        if (thread != nullptr) {
+            PlaybackThread *t = (PlaybackThread *)thread.get();
+            Mutex::Autolock _l(t->mLock);
+            status = t->mOutput->stream->getDualMonoMode(mode);
+            ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
+                    "%s: mode %d inconsistent", __func__, mDualMonoMode);
+        }
+    }
+    return status;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::setDualMonoMode(audio_dual_mono_mode_t mode)
+{
+    status_t status = INVALID_OPERATION;
+    if (isOffloadedOrDirect()) {
+        sp<ThreadBase> thread = mThread.promote();
+        if (thread != nullptr) {
+            auto t = static_cast<PlaybackThread *>(thread.get());
+            Mutex::Autolock lock(t->mLock);
+            status = t->mOutput->stream->setDualMonoMode(mode);
+            if (status == NO_ERROR) {
+                mDualMonoMode = mode;
+            }
+        }
+    }
+    return status;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::getAudioDescriptionMixLevel(float* leveldB)
+{
+    status_t status = INVALID_OPERATION;
+    if (isOffloadedOrDirect()) {
+        sp<ThreadBase> thread = mThread.promote();
+        if (thread != nullptr) {
+            auto t = static_cast<PlaybackThread *>(thread.get());
+            Mutex::Autolock lock(t->mLock);
+            status = t->mOutput->stream->getAudioDescriptionMixLevel(leveldB);
+            ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
+                    "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
+        }
+    }
+    return status;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::setAudioDescriptionMixLevel(float leveldB)
+{
+    status_t status = INVALID_OPERATION;
+    if (isOffloadedOrDirect()) {
+        sp<ThreadBase> thread = mThread.promote();
+        if (thread != nullptr) {
+            auto t = static_cast<PlaybackThread *>(thread.get());
+            Mutex::Autolock lock(t->mLock);
+            status = t->mOutput->stream->setAudioDescriptionMixLevel(leveldB);
+            if (status == NO_ERROR) {
+                mAudioDescriptionMixLevel = leveldB;
+            }
+        }
+    }
+    return status;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::getPlaybackRateParameters(
+        audio_playback_rate_t* playbackRate)
+{
+    status_t status = INVALID_OPERATION;
+    if (isOffloadedOrDirect()) {
+        sp<ThreadBase> thread = mThread.promote();
+        if (thread != nullptr) {
+            auto t = static_cast<PlaybackThread *>(thread.get());
+            Mutex::Autolock lock(t->mLock);
+            status = t->mOutput->stream->getPlaybackRateParameters(playbackRate);
+            ALOGD_IF((status == NO_ERROR) &&
+                    !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
+                    "%s: playbackRate inconsistent", __func__);
+        }
+    }
+    return status;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::setPlaybackRateParameters(
+        const audio_playback_rate_t& playbackRate)
+{
+    status_t status = INVALID_OPERATION;
+    if (isOffloadedOrDirect()) {
+        sp<ThreadBase> thread = mThread.promote();
+        if (thread != nullptr) {
+            auto t = static_cast<PlaybackThread *>(thread.get());
+            Mutex::Autolock lock(t->mLock);
+            status = t->mOutput->stream->setPlaybackRateParameters(playbackRate);
+            if (status == NO_ERROR) {
+                mPlaybackRateParameters = playbackRate;
+            }
+        }
+    }
+    return status;
+}
+
 //To be called with thread lock held
 bool AudioFlinger::PlaybackThread::Track::isResumePending() {
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 25f7c27..5c47d1b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -505,6 +505,9 @@
         lConfig.offload_info.duration_us = -1;
         lConfig.offload_info.has_video = true; // conservative
         lConfig.offload_info.is_streaming = true; // likely
+        lConfig.offload_info.encapsulation_mode = lConfig.offload_info.encapsulation_mode;
+        lConfig.offload_info.content_id = lConfig.offload_info.content_id;
+        lConfig.offload_info.sync_id = lConfig.offload_info.sync_id;
     }
 
     mFlags = (audio_output_flags_t)(mFlags | flags);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index a192083..e1d806d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1037,11 +1037,9 @@
     *output = AUDIO_IO_HANDLE_NONE;
     if (!msdDevices.isEmpty()) {
         *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
-        sp<DeviceDescriptor> device = outputDevices.isEmpty() ? nullptr : outputDevices.itemAt(0);
-        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatches(&outputDevices) == NO_ERROR) {
             ALOGV("%s() Using MSD devices %s instead of devices %s",
                   __func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
-            outputDevices = msdDevices;
         } else {
             *output = AUDIO_IO_HANDLE_NONE;
         }
@@ -1055,6 +1053,12 @@
     }
 
     *selectedDeviceId = getFirstDeviceId(outputDevices);
+    for (auto &outputDevice : outputDevices) {
+        if (outputDevice->getId() == getConfig().getDefaultOutputDevice()->getId()) {
+            *selectedDeviceId = outputDevice->getId();
+            break;
+        }
+    }
 
     if (outputDevices.onlyContainsDevicesWithType(AUDIO_DEVICE_OUT_TELEPHONY_TX)) {
         *outputType = API_OUTPUT_TELEPHONY_TX;
@@ -1197,24 +1201,9 @@
     sp<SwAudioOutputDescriptor> outputDesc =
             new SwAudioOutputDescriptor(profile, mpClientInterface);
 
-    String8 address = getFirstDeviceAddress(devices);
-
-    // MSD patch may be using the only output stream that can service this request. Release
-    // MSD patch to prioritize this request over any active output on MSD.
-    AudioPatchCollection msdPatches = getMsdPatches();
-    for (size_t i = 0; i < msdPatches.size(); i++) {
-        const auto& patch = msdPatches[i];
-        for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
-            const struct audio_port_config *sink = &patch->mPatch.sinks[j];
-            if (sink->type == AUDIO_PORT_TYPE_DEVICE &&
-                    devices.containsDeviceWithType(sink->ext.device.type) &&
-                    (address.isEmpty() || strncmp(sink->ext.device.address, address.string(),
-                            AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
-                releaseAudioPatch(patch->getHandle(), mUidCached);
-                break;
-            }
-        }
-    }
+    // An MSD patch may be using the only output stream that can service this request. Release
+    // all MSD patches to prioritize this request over any active output on MSD.
+    releaseMsdPatches(devices);
 
     status_t status = outputDesc->open(config, devices, stream, flags, output);
 
@@ -1387,7 +1376,8 @@
     }
     AudioProfileVector deviceProfiles;
     for (const auto &outProfile : outputProfiles) {
-        if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
+        if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) &&
+                outProfile->supportsDevice(outputDevice)) {
             appendAudioProfiles(deviceProfiles, outProfile->getAudioProfiles());
         }
     }
@@ -1455,40 +1445,85 @@
     return patchBuilder;
 }
 
-status_t AudioPolicyManager::setMsdPatch(const sp<DeviceDescriptor> &outputDevice) {
-    sp<DeviceDescriptor> device = outputDevice;
-    if (device == nullptr) {
+status_t AudioPolicyManager::setMsdPatches(const DeviceVector *outputDevices) {
+    DeviceVector devices;
+    if (outputDevices != nullptr && outputDevices->size() > 0) {
+        devices.add(*outputDevices);
+    } else {
         // Use media strategy for unspecified output device. This should only
         // occur on checkForDeviceAndOutputChanges(). Device connection events may
         // therefore invalidate explicit routing requests.
-        DeviceVector devices = mEngine->getOutputDevicesForAttributes(
+        devices = mEngine->getOutputDevicesForAttributes(
                     attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/);
-        LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no outpudevice to set Msd Patch");
-        device = devices.itemAt(0);
+        LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no output device to set MSD patch");
     }
-    ALOGV("%s() for device %s", __func__, device->toString().c_str());
-    PatchBuilder patchBuilder = buildMsdPatch(device);
-    const struct audio_patch* patch = patchBuilder.patch();
-    const AudioPatchCollection msdPatches = getMsdPatches();
-    if (!msdPatches.isEmpty()) {
-        LOG_ALWAYS_FATAL_IF(msdPatches.size() > 1,
-                "The current MSD prototype only supports one output patch");
-        sp<AudioPatch> currentPatch = msdPatches.valueAt(0);
-        if (audio_patches_are_equal(&currentPatch->mPatch, patch)) {
-            return NO_ERROR;
+    std::vector<PatchBuilder> patchesToCreate;
+    for (auto i = 0u; i < devices.size(); ++i) {
+        ALOGV("%s() for device %s", __func__, devices[i]->toString().c_str());
+        patchesToCreate.push_back(buildMsdPatch(devices[i]));
+    }
+    // Retain only the MSD patches associated with outputDevices request.
+    // Tear down the others, and create new ones as needed.
+    AudioPatchCollection patchesToRemove = getMsdPatches();
+    for (auto it = patchesToCreate.begin(); it != patchesToCreate.end(); ) {
+        auto retainedPatch = false;
+        for (auto i = 0u; i < patchesToRemove.size(); ++i) {
+            if (audio_patches_are_equal(it->patch(), &patchesToRemove[i]->mPatch)) {
+                patchesToRemove.removeItemsAt(i);
+                retainedPatch = true;
+                break;
+            }
         }
+        if (retainedPatch) {
+            it = patchesToCreate.erase(it);
+            continue;
+        }
+        ++it;
+    }
+    if (patchesToCreate.size() == 0 && patchesToRemove.size() == 0) {
+        return NO_ERROR;
+    }
+    for (auto i = 0u; i < patchesToRemove.size(); ++i) {
+        auto &currentPatch = patchesToRemove.valueAt(i);
         releaseAudioPatch(currentPatch->getHandle(), mUidCached);
     }
-    status_t status = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
-            patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
-    ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status);
-    ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to "
-           "device:%s (format:%#x channels:%#x samplerate:%d)", __func__,
-             device->toString().c_str(), patch->sources[0].format,
-             patch->sources[0].channel_mask, patch->sources[0].sample_rate);
+    status_t status = NO_ERROR;
+    for (const auto &p : patchesToCreate) {
+        auto currStatus = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
+                p.patch(), 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
+        char message[256];
+        snprintf(message, sizeof(message), "%s() %s: creating MSD patch from device:IN_BUS to "
+            "device:%#x (format:%#x channels:%#x samplerate:%d)", __func__,
+                currStatus == NO_ERROR ? "Success" : "Error",
+                p.patch()->sinks[0].ext.device.type, p.patch()->sources[0].format,
+                p.patch()->sources[0].channel_mask, p.patch()->sources[0].sample_rate);
+        if (currStatus == NO_ERROR) {
+            ALOGD("%s", message);
+        } else {
+            ALOGE("%s", message);
+            if (status == NO_ERROR) {
+                status = currStatus;
+            }
+        }
+    }
     return status;
 }
 
+void AudioPolicyManager::releaseMsdPatches(const DeviceVector& devices) {
+    AudioPatchCollection msdPatches = getMsdPatches();
+    for (size_t i = 0; i < msdPatches.size(); i++) {
+        const auto& patch = msdPatches[i];
+        for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
+            const struct audio_port_config *sink = &patch->mPatch.sinks[j];
+            if (sink->type == AUDIO_PORT_TYPE_DEVICE && devices.getDevice(sink->ext.device.type,
+                    String8(sink->ext.device.address), AUDIO_FORMAT_DEFAULT) != nullptr) {
+                releaseAudioPatch(patch->getHandle(), mUidCached);
+                break;
+            }
+        }
+    }
+}
+
 audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                                        audio_output_flags_t flags,
                                                        audio_format_t format,
@@ -5310,8 +5345,13 @@
             }
         }
         if (!directOutputOpen) {
-            ALOGV("no direct outputs open, reset MSD patch");
-            setMsdPatch();
+            ALOGV("no direct outputs open, reset MSD patches");
+            // TODO: The MSD patches to be established here may differ to current MSD patches due to
+            // how output devices for patching are resolved. Avoid by caching and reusing the
+            // arguments to mEngine->getOutputDevicesForAttributes() when resolving which output
+            // devices to patch to. This may be complicated by the fact that devices may become
+            // unavailable.
+            setMsdPatches();
         }
     }
 }
@@ -5378,7 +5418,13 @@
     if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
     updateDevicesAndOutputs();
     if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
-        setMsdPatch();
+        // TODO: The MSD patches to be established here may differ to current MSD patches due to how
+        // output devices for patching are resolved. Nevertheless, AudioTracks affected by device
+        // configuration changes will ultimately be rerouted correctly. We can still avoid
+        // unnecessary rerouting by caching and reusing the arguments to
+        // mEngine->getOutputDevicesForAttributes() when resolving which output devices to patch to.
+        // This may be complicated by the fact that devices may become unavailable.
+        setMsdPatches();
     }
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 33639cd..c1c483c 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -844,13 +844,6 @@
         // end point.
         audio_port_handle_t mCallRxSourceClientPort = AUDIO_PORT_HANDLE_NONE;
 
-private:
-        void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
-
-        // Add or remove AC3 DTS encodings based on user preferences.
-        void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
-        void modifySurroundChannelMasks(ChannelMaskSet *channelMasksPtr);
-
         // Support for Multi-Stream Decoder (MSD) module
         sp<DeviceDescriptor> getMsdAudioInDevice() const;
         DeviceVector getMsdAudioOutDevices() const;
@@ -860,7 +853,14 @@
                                            audio_port_config *sourceConfig,
                                            audio_port_config *sinkConfig) const;
         PatchBuilder buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const;
-        status_t setMsdPatch(const sp<DeviceDescriptor> &outputDevice = nullptr);
+        status_t setMsdPatches(const DeviceVector *outputDevices = nullptr);
+        void releaseMsdPatches(const DeviceVector& devices);
+private:
+        void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
+
+        // Add or remove AC3 DTS encodings based on user preferences.
+        void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
+        void modifySurroundChannelMasks(ChannelMaskSet *channelMasksPtr);
 
         // If any, resolve any "dynamic" fields of an Audio Profiles collection
         void updateAudioProfiles(const sp<DeviceDescriptor>& devDesc, audio_io_handle_t ioHandle,
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index 8bab020..c096427 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -29,6 +29,8 @@
     using AudioPolicyManager::getOutputs;
     using AudioPolicyManager::getAvailableOutputDevices;
     using AudioPolicyManager::getAvailableInputDevices;
+    using AudioPolicyManager::releaseMsdPatches;
+    using AudioPolicyManager::setMsdPatches;
     uint32_t getAudioPortGeneration() const { return mAudioPortGeneration; }
 };
 
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index ca2164b..f391606 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -319,15 +319,44 @@
 
 // TODO: Add patch creation tests that involve already existing patch
 
-class AudioPolicyManagerTestMsd : public AudioPolicyManagerTest {
+enum
+{
+    MSD_AUDIO_PATCH_COUNT_NUM_AUDIO_PATCHES_INDEX = 0,
+    MSD_AUDIO_PATCH_COUNT_NAME_INDEX = 1
+};
+using MsdAudioPatchCountSpecification = std::tuple<size_t, std::string>;
+
+class AudioPolicyManagerTestMsd : public AudioPolicyManagerTest,
+        public ::testing::WithParamInterface<MsdAudioPatchCountSpecification> {
+  public:
+    AudioPolicyManagerTestMsd();
   protected:
     void SetUpManagerConfig() override;
     void TearDown() override;
 
     sp<DeviceDescriptor> mMsdOutputDevice;
     sp<DeviceDescriptor> mMsdInputDevice;
+    sp<DeviceDescriptor> mDefaultOutputDevice;
+
+    const size_t mExpectedAudioPatchCount;
+    sp<DeviceDescriptor> mSpdifDevice;
 };
 
+AudioPolicyManagerTestMsd::AudioPolicyManagerTestMsd()
+    : mExpectedAudioPatchCount(std::get<MSD_AUDIO_PATCH_COUNT_NUM_AUDIO_PATCHES_INDEX>(
+            GetParam())) {}
+
+INSTANTIATE_TEST_CASE_P(
+        MsdAudioPatchCount,
+        AudioPolicyManagerTestMsd,
+        ::testing::Values(
+                MsdAudioPatchCountSpecification(1u, "single"),
+                MsdAudioPatchCountSpecification(2u, "dual")
+        ),
+        [](const ::testing::TestParamInfo<MsdAudioPatchCountSpecification> &info) {
+                return std::get<MSD_AUDIO_PATCH_COUNT_NAME_INDEX>(info.param); }
+);
+
 void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
     // TODO: Consider using Serializer to load part of the config from a string.
     AudioPolicyManagerTest::SetUpManagerConfig();
@@ -347,6 +376,19 @@
     config.addDevice(mMsdOutputDevice);
     config.addDevice(mMsdInputDevice);
 
+    if (mExpectedAudioPatchCount == 2) {
+        // Add SPDIF device with PCM output profile as a second device for dual MSD audio patching.
+        mSpdifDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPDIF);
+        mSpdifDevice->addAudioProfile(pcmOutputProfile);
+        config.addDevice(mSpdifDevice);
+
+        sp<OutputProfile> spdifOutputProfile = new OutputProfile("spdif output");
+        spdifOutputProfile->addAudioProfile(pcmOutputProfile);
+        spdifOutputProfile->addSupportedDevice(mSpdifDevice);
+        config.getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
+                addOutputProfile(spdifOutputProfile);
+    }
+
     sp<HwModule> msdModule = new HwModule(AUDIO_HARDWARE_MODULE_ID_MSD, 2 /*halVersionMajor*/);
     HwModuleCollection modules = config.getHwModules();
     modules.add(msdModule);
@@ -380,62 +422,90 @@
     primaryEncodedOutputProfile->addSupportedDevice(config.getDefaultOutputDevice());
     config.getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
             addOutputProfile(primaryEncodedOutputProfile);
+
+    mDefaultOutputDevice = config.getDefaultOutputDevice();
+    if (mExpectedAudioPatchCount == 2) {
+        mSpdifDevice->addAudioProfile(dtsOutputProfile);
+        primaryEncodedOutputProfile->addSupportedDevice(mSpdifDevice);
+    }
 }
 
 void AudioPolicyManagerTestMsd::TearDown() {
     mMsdOutputDevice.clear();
     mMsdInputDevice.clear();
+    mDefaultOutputDevice.clear();
+    mSpdifDevice.clear();
     AudioPolicyManagerTest::TearDown();
 }
 
-TEST_F(AudioPolicyManagerTestMsd, InitSuccess) {
+TEST_P(AudioPolicyManagerTestMsd, InitSuccess) {
     ASSERT_TRUE(mMsdOutputDevice);
     ASSERT_TRUE(mMsdInputDevice);
+    ASSERT_TRUE(mDefaultOutputDevice);
 }
 
-TEST_F(AudioPolicyManagerTestMsd, Dump) {
+TEST_P(AudioPolicyManagerTestMsd, Dump) {
     dumpToLog();
 }
 
-TEST_F(AudioPolicyManagerTestMsd, PatchCreationOnSetForceUse) {
+TEST_P(AudioPolicyManagerTestMsd, PatchCreationOnSetForceUse) {
     const PatchCountCheck patchCount = snapshotPatchCount();
     mManager->setForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND,
             AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
-    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
 }
 
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
+TEST_P(AudioPolicyManagerTestMsd, PatchCreationSetReleaseMsdPatches) {
+    const PatchCountCheck patchCount = snapshotPatchCount();
+    DeviceVector devices = mManager->getAvailableOutputDevices();
+    // Remove MSD output device to avoid patching to itself
+    devices.remove(mMsdOutputDevice);
+    ASSERT_EQ(mExpectedAudioPatchCount, devices.size());
+    mManager->setMsdPatches(&devices);
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
+    // Dual patch: exercise creating one new audio patch and reusing another existing audio patch.
+    DeviceVector singleDevice(devices[0]);
+    mManager->releaseMsdPatches(singleDevice);
+    ASSERT_EQ(mExpectedAudioPatchCount - 1, patchCount.deltaFromSnapshot());
+    mManager->setMsdPatches(&devices);
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
+    mManager->releaseMsdPatches(devices);
+    ASSERT_EQ(0, patchCount.deltaFromSnapshot());
+}
+
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
-    ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+    ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
 }
 
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
-    ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+    ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
 }
 
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
-    ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+    ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
+    selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
-    ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-    ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+    ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
+    ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
 }
 
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
@@ -444,7 +514,7 @@
     ASSERT_EQ(0, patchCount.deltaFromSnapshot());
 }
 
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrFormatSwitching) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrFormatSwitching) {
     // Switch between formats that are supported and not supported by MSD.
     {
         const PatchCountCheck patchCount = snapshotPatchCount();
@@ -453,10 +523,10 @@
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
                 nullptr /*output*/, &portId);
-        ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
-        ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+        ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
+        ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
         mManager->releaseOutput(portId);
-        ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+        ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
     }
     {
         const PatchCountCheck patchCount = snapshotPatchCount();
@@ -466,7 +536,7 @@
                 AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
                 nullptr /*output*/, &portId);
         ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
-        ASSERT_EQ(-1, patchCount.deltaFromSnapshot());
+        ASSERT_EQ(-static_cast<int>(mExpectedAudioPatchCount), patchCount.deltaFromSnapshot());
         mManager->releaseOutput(portId);
         ASSERT_EQ(0, patchCount.deltaFromSnapshot());
     }
@@ -475,7 +545,7 @@
         audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
-        ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
+        ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
         ASSERT_EQ(0, patchCount.deltaFromSnapshot());
     }
 }