Merge "aaudio: use unique_ptr for mixer buffer"
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index a16b106..3780a5a 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -52,6 +52,9 @@
     // directly in the main device manifest.xml file or via vintf_fragments.
     // (Remove the line below if the entry is already in the main manifest.)
     vintf_fragments: ["manifest_media_c2_V1_1_default.xml"],
+
+    // Remove this line to enable this module.
+    enabled: false,
 }
 
 // seccomp policy file.
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 80bd093..8d374c9 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -234,17 +234,20 @@
     static_assert(NCHAN > 0 && NCHAN <= 8);
     static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
             || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
-            || MIXTYPE == MIXTYPE_STEREOEXPAND);
+            || MIXTYPE == MIXTYPE_STEREOEXPAND
+            || MIXTYPE == MIXTYPE_MONOEXPAND);
     auto proc = [](auto& a, const auto& b) {
         if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
-                || MIXTYPE == MIXTYPE_STEREOEXPAND) {
+                || MIXTYPE == MIXTYPE_STEREOEXPAND
+                || MIXTYPE == MIXTYPE_MONOEXPAND) {
             a += b;
         } else {
             a = b;
         }
     };
     auto inp = [&in]() -> const TI& {
-        if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) {
+        if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND
+                || MIXTYPE == MIXTYPE_MONOEXPAND) {
             return *in;
         } else {
             return *in++;
@@ -312,6 +315,8 @@
  *   TV/TAV: int32_t (U4.28) or int16_t (U4.12) or float
  *   Input channel count is 1.
  *   vol: represents volume array.
+ *   This uses stereo balanced volume vol[0] and vol[1].
+ *   Before R, this was a full volume array but was called only for channels <= 2.
  *
  *   This accumulates into the out pointer.
  *
@@ -356,17 +361,13 @@
         do {
             TA auxaccum = 0;
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                     vol[i] += volinc[i];
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
-                    vol[i] += volinc[i];
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                     vol[i] += volinc[i];
@@ -383,11 +384,13 @@
                 vol[0] += volinc[0];
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(
                         out, in, vol, [&auxaccum] (auto &a, const auto &b) {
                     return MixMulAux<TO, TI, TV, TA>(a, b, &auxaccum);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
                 vol[0] += volinc[0];
                 vol[1] += volinc[1];
@@ -401,17 +404,13 @@
     } else {
         do {
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
                     vol[i] += volinc[i];
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
-                    vol[i] += volinc[i];
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
                     vol[i] += volinc[i];
@@ -428,10 +427,12 @@
                 vol[0] += volinc[0];
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(out, in, vol, [] (auto &a, const auto &b) {
                     return MixMul<TO, TI, TV>(a, b);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
                 vol[0] += volinc[0];
                 vol[1] += volinc[1];
@@ -454,15 +455,12 @@
         do {
             TA auxaccum = 0;
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                 }
@@ -476,11 +474,13 @@
                 }
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(
                         out, in, vol, [&auxaccum] (auto &a, const auto &b) {
                     return MixMulAux<TO, TI, TV, TA>(a, b, &auxaccum);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
             } else /* constexpr */ {
                 static_assert(dependent_false<MIXTYPE>, "invalid mixtype");
@@ -490,16 +490,14 @@
         } while (--frameCount);
     } else {
         do {
+            // ALOGD("Mixtype:%d NCHAN:%d", MIXTYPE, NCHAN);
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
                 }
@@ -513,10 +511,12 @@
                 }
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(out, in, vol, [] (auto &a, const auto &b) {
                     return MixMul<TO, TI, TV>(a, b);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
             } else /* constexpr */ {
                 static_assert(dependent_false<MIXTYPE>, "invalid mixtype");
diff --git a/media/libaudioprocessing/tests/mixerops_benchmark.cpp b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
index 86f5429..7a4c5c7 100644
--- a/media/libaudioprocessing/tests/mixerops_benchmark.cpp
+++ b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
@@ -74,28 +74,32 @@
     }
 }
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 2);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 2);
+// MULTI mode and MULTI_SAVEONLY mode are not used by AudioMixer for channels > 2,
+// which is ensured by a static_assert (won't compile for those configurations).
+// So we benchmark MIXTYPE_MULTI_MONOVOL and MIXTYPE_MULTI_SAVEONLY_MONOVOL compared
+// with MIXTYPE_MULTI_STEREOVOL and MIXTYPE_MULTI_SAVEONLY_STEREOVOL.
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 2);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 2);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 2);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 2);
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 4);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 4);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 4);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 4);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 4);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 4);
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 5);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 5);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 5);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 5);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 5);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 5);
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 8);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 8);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 8);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 8);
 
-BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI, 8);
-BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_SAVEONLY, 8);
+BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_MONOVOL, 8);
+BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_STEREOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 8);
 
diff --git a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
index b485826..e3996b0 100644
--- a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
@@ -94,6 +94,7 @@
     if (mState == STARTED) {
         abortTranscodeLoop();
         mTranscodingThread.join();
+        mOutputQueue->abort();  // Wake up any threads waiting for samples.
         mState = STOPPED;
         return true;
     }
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index 49bfdfe..ed702db 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -229,11 +229,11 @@
         return AMEDIA_ERROR_INVALID_PARAMETER;
     }
 
-    std::unique_ptr<MediaTrackTranscoder> transcoder = nullptr;
-    std::shared_ptr<AMediaFormat> format = nullptr;
+    std::shared_ptr<MediaTrackTranscoder> transcoder;
+    std::shared_ptr<AMediaFormat> format;
 
     if (trackFormat == nullptr) {
-        transcoder = std::make_unique<PassthroughTrackTranscoder>(shared_from_this());
+        transcoder = std::make_shared<PassthroughTrackTranscoder>(shared_from_this());
     } else {
         const char* srcMime = nullptr;
         if (!AMediaFormat_getString(mSourceTrackFormats[trackIndex].get(), AMEDIAFORMAT_KEY_MIME,
@@ -258,7 +258,7 @@
             }
         }
 
-        transcoder = std::make_unique<VideoTrackTranscoder>(shared_from_this());
+        transcoder = VideoTrackTranscoder::create(shared_from_this());
 
         AMediaFormat* mergedFormat =
                 mergeMediaFormats(mSourceTrackFormats[trackIndex].get(), trackFormat);
diff --git a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
index a4cbf33..e2cc6b6 100644
--- a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
@@ -44,8 +44,17 @@
     // Check if the free list contains a large enough buffer.
     auto it = mFreeBufferMap.lower_bound(minimumBufferSize);
     if (it != mFreeBufferMap.end()) {
+        uint8_t* buffer = it->second;
         mFreeBufferMap.erase(it);
-        return it->second;
+        return buffer;
+    }
+
+    // If the maximum buffer count is reached, remove an existing free buffer.
+    if (mAddressSizeMap.size() >= mMaxBufferCount) {
+        auto it = mFreeBufferMap.begin();
+        mAddressSizeMap.erase(it->second);
+        delete[] it->second;
+        mFreeBufferMap.erase(it);
     }
 
     // Allocate a new buffer.
@@ -55,14 +64,6 @@
         return nullptr;
     }
 
-    // If the maximum buffer count is reached, remove an existing free buffer.
-    if (mAddressSizeMap.size() >= mMaxBufferCount) {
-        auto it = mFreeBufferMap.begin();
-        mFreeBufferMap.erase(it);
-        mAddressSizeMap.erase(it->second);
-        delete[] it->second;
-    }
-
     // Add the buffer to the tracking set.
     mAddressSizeMap.emplace(buffer, minimumBufferSize);
     return buffer;
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index 8ee252f..65dcad3 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -59,36 +59,70 @@
     return value;
 }
 
+// The CodecWrapper class is used to let AMediaCodec instances outlive the transcoder object itself
+// by giving the codec a weak pointer to the transcoder. Codecs wrapped in this object are kept
+// alive by the transcoder and the codec's outstanding buffers. Once the transcoder stops and all
+// output buffers have been released by downstream components the codec will also be released.
+class VideoTrackTranscoder::CodecWrapper {
+public:
+    CodecWrapper(AMediaCodec* codec, const std::weak_ptr<VideoTrackTranscoder>& transcoder)
+          : mCodec(codec), mTranscoder(transcoder), mCodecStarted(false) {}
+    ~CodecWrapper() {
+        if (mCodecStarted) {
+            AMediaCodec_stop(mCodec);
+        }
+        AMediaCodec_delete(mCodec);
+    }
+
+    AMediaCodec* getCodec() { return mCodec; }
+    std::shared_ptr<VideoTrackTranscoder> getTranscoder() const { return mTranscoder.lock(); };
+    void setStarted() { mCodecStarted = true; }
+
+private:
+    AMediaCodec* mCodec;
+    std::weak_ptr<VideoTrackTranscoder> mTranscoder;
+    bool mCodecStarted;
+};
+
 // Dispatch responses to codec callbacks onto the message queue.
 struct AsyncCodecCallbackDispatch {
     static void onAsyncInputAvailable(AMediaCodec* codec, void* userdata, int32_t index) {
-        VideoTrackTranscoder* transcoder = static_cast<VideoTrackTranscoder*>(userdata);
-        if (codec == transcoder->mDecoder) {
-            transcoder->mCodecMessageQueue.push(
-                    [transcoder, index] { transcoder->enqueueInputSample(index); });
+        VideoTrackTranscoder::CodecWrapper* wrapper =
+                static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+        if (auto transcoder = wrapper->getTranscoder()) {
+            if (codec == transcoder->mDecoder) {
+                transcoder->mCodecMessageQueue.push(
+                        [transcoder, index] { transcoder->enqueueInputSample(index); });
+            }
         }
     }
 
     static void onAsyncOutputAvailable(AMediaCodec* codec, void* userdata, int32_t index,
                                        AMediaCodecBufferInfo* bufferInfoPtr) {
-        VideoTrackTranscoder* transcoder = static_cast<VideoTrackTranscoder*>(userdata);
+        VideoTrackTranscoder::CodecWrapper* wrapper =
+                static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
         AMediaCodecBufferInfo bufferInfo = *bufferInfoPtr;
-        transcoder->mCodecMessageQueue.push([transcoder, index, codec, bufferInfo] {
-            if (codec == transcoder->mDecoder) {
-                transcoder->transferBuffer(index, bufferInfo);
-            } else if (codec == transcoder->mEncoder.get()) {
-                transcoder->dequeueOutputSample(index, bufferInfo);
-            }
-        });
+        if (auto transcoder = wrapper->getTranscoder()) {
+            transcoder->mCodecMessageQueue.push([transcoder, index, codec, bufferInfo] {
+                if (codec == transcoder->mDecoder) {
+                    transcoder->transferBuffer(index, bufferInfo);
+                } else if (codec == transcoder->mEncoder->getCodec()) {
+                    transcoder->dequeueOutputSample(index, bufferInfo);
+                }
+            });
+        }
     }
 
     static void onAsyncFormatChanged(AMediaCodec* codec, void* userdata, AMediaFormat* format) {
-        VideoTrackTranscoder* transcoder = static_cast<VideoTrackTranscoder*>(userdata);
-        const char* kCodecName = (codec == transcoder->mDecoder ? "Decoder" : "Encoder");
-        LOG(DEBUG) << kCodecName << " format changed: " << AMediaFormat_toString(format);
-        if (codec == transcoder->mEncoder.get()) {
-            transcoder->mCodecMessageQueue.push(
-                    [transcoder, format] { transcoder->updateTrackFormat(format); });
+        VideoTrackTranscoder::CodecWrapper* wrapper =
+                static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+        if (auto transcoder = wrapper->getTranscoder()) {
+            const char* kCodecName = (codec == transcoder->mDecoder ? "Decoder" : "Encoder");
+            LOG(DEBUG) << kCodecName << " format changed: " << AMediaFormat_toString(format);
+            if (codec == transcoder->mEncoder->getCodec()) {
+                transcoder->mCodecMessageQueue.push(
+                        [transcoder, format] { transcoder->updateTrackFormat(format); });
+            }
         }
     }
 
@@ -96,16 +130,25 @@
                              int32_t actionCode, const char* detail) {
         LOG(ERROR) << "Error from codec " << codec << ", userdata " << userdata << ", error "
                    << error << ", action " << actionCode << ", detail " << detail;
-        VideoTrackTranscoder* transcoder = static_cast<VideoTrackTranscoder*>(userdata);
-        transcoder->mCodecMessageQueue.push(
-                [transcoder, error] {
-                    transcoder->mStatus = error;
-                    transcoder->mStopRequested = true;
-                },
-                true);
+        VideoTrackTranscoder::CodecWrapper* wrapper =
+                static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+        if (auto transcoder = wrapper->getTranscoder()) {
+            transcoder->mCodecMessageQueue.push(
+                    [transcoder, error] {
+                        transcoder->mStatus = error;
+                        transcoder->mStopRequested = true;
+                    },
+                    true);
+        }
     }
 };
 
+// static
+std::shared_ptr<VideoTrackTranscoder> VideoTrackTranscoder::create(
+        const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback) {
+    return std::shared_ptr<VideoTrackTranscoder>(new VideoTrackTranscoder(transcoderCallback));
+}
+
 VideoTrackTranscoder::~VideoTrackTranscoder() {
     if (mDecoder != nullptr) {
         AMediaCodec_delete(mDecoder);
@@ -159,17 +202,17 @@
         LOG(ERROR) << "Unable to create encoder for type " << destinationMime;
         return AMEDIA_ERROR_UNSUPPORTED;
     }
-    mEncoder = std::shared_ptr<AMediaCodec>(encoder,
-                                            std::bind(AMediaCodec_delete, std::placeholders::_1));
+    mEncoder = std::make_shared<CodecWrapper>(encoder, shared_from_this());
 
-    status = AMediaCodec_configure(mEncoder.get(), mDestinationFormat.get(), NULL /* surface */,
-                                   NULL /* crypto */, AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
+    status = AMediaCodec_configure(mEncoder->getCodec(), mDestinationFormat.get(),
+                                   NULL /* surface */, NULL /* crypto */,
+                                   AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
     if (status != AMEDIA_OK) {
         LOG(ERROR) << "Unable to configure video encoder: " << status;
         return status;
     }
 
-    status = AMediaCodec_createInputSurface(mEncoder.get(), &mSurface);
+    status = AMediaCodec_createInputSurface(mEncoder->getCodec(), &mSurface);
     if (status != AMEDIA_OK) {
         LOG(ERROR) << "Unable to create an encoder input surface: %d" << status;
         return status;
@@ -203,13 +246,17 @@
             .onAsyncFormatChanged = AsyncCodecCallbackDispatch::onAsyncFormatChanged,
             .onAsyncError = AsyncCodecCallbackDispatch::onAsyncError};
 
-    status = AMediaCodec_setAsyncNotifyCallback(mDecoder, asyncCodecCallbacks, this);
+    // Note: The decoder does not need its own wrapper because its lifetime is tied to the
+    // transcoder. But the same callbacks are reused for decoder and encoder so we pass the encoder
+    // wrapper as userdata here but never read the codec from it in the callback.
+    status = AMediaCodec_setAsyncNotifyCallback(mDecoder, asyncCodecCallbacks, mEncoder.get());
     if (status != AMEDIA_OK) {
         LOG(ERROR) << "Unable to set decoder to async mode: " << status;
         return status;
     }
 
-    status = AMediaCodec_setAsyncNotifyCallback(mEncoder.get(), asyncCodecCallbacks, this);
+    status = AMediaCodec_setAsyncNotifyCallback(mEncoder->getCodec(), asyncCodecCallbacks,
+                                                mEncoder.get());
     if (status != AMEDIA_OK) {
         LOG(ERROR) << "Unable to set encoder to async mode: " << status;
         return status;
@@ -277,7 +324,7 @@
 
     if (bufferInfo.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
         LOG(DEBUG) << "EOS from decoder.";
-        media_status_t status = AMediaCodec_signalEndOfInputStream(mEncoder.get());
+        media_status_t status = AMediaCodec_signalEndOfInputStream(mEncoder->getCodec());
         if (status != AMEDIA_OK) {
             LOG(ERROR) << "SignalEOS on encoder returned error: " << status;
             mStatus = status;
@@ -289,12 +336,14 @@
                                                AMediaCodecBufferInfo bufferInfo) {
     if (bufferIndex >= 0) {
         size_t sampleSize = 0;
-        uint8_t* buffer = AMediaCodec_getOutputBuffer(mEncoder.get(), bufferIndex, &sampleSize);
+        uint8_t* buffer =
+                AMediaCodec_getOutputBuffer(mEncoder->getCodec(), bufferIndex, &sampleSize);
 
-        MediaSample::OnSampleReleasedCallback bufferReleaseCallback = [encoder = mEncoder](
-                                                                              MediaSample* sample) {
-            AMediaCodec_releaseOutputBuffer(encoder.get(), sample->bufferId, false /* render */);
-        };
+        MediaSample::OnSampleReleasedCallback bufferReleaseCallback =
+                [encoder = mEncoder](MediaSample* sample) {
+                    AMediaCodec_releaseOutputBuffer(encoder->getCodec(), sample->bufferId,
+                                                    false /* render */);
+                };
 
         std::shared_ptr<MediaSample> sample = MediaSample::createWithReleaseCallback(
                 buffer, bufferInfo.offset, bufferIndex, bufferReleaseCallback);
@@ -309,7 +358,7 @@
             return;
         }
     } else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
-        AMediaFormat* newFormat = AMediaCodec_getOutputFormat(mEncoder.get());
+        AMediaFormat* newFormat = AMediaCodec_getOutputFormat(mEncoder->getCodec());
         LOG(DEBUG) << "Encoder output format changed: " << AMediaFormat_toString(newFormat);
     }
 
@@ -400,11 +449,12 @@
     });
 
     mCodecMessageQueue.push([this] {
-        media_status_t status = AMediaCodec_start(mEncoder.get());
+        media_status_t status = AMediaCodec_start(mEncoder->getCodec());
         if (status != AMEDIA_OK) {
             LOG(ERROR) << "Unable to start video encoder: " << status;
             mStatus = status;
         }
+        mEncoder->setStarted();
     });
 
     // Process codec events until EOS is reached, transcoding is stopped or an error occurs.
@@ -419,8 +469,6 @@
     }
 
     AMediaCodec_stop(mDecoder);
-    // TODO: Stop invalidates all buffers. Stop encoder when last buffer is released.
-    //    AMediaCodec_stop(mEncoder.get());
     return mStatus;
 }
 
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h b/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
index 33bd9d4..031d01e 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
@@ -142,7 +142,7 @@
     std::shared_ptr<MediaSampleReader> mSampleReader;
     std::unique_ptr<MediaSampleWriter> mSampleWriter;
     std::vector<std::shared_ptr<AMediaFormat>> mSourceTrackFormats;
-    std::vector<std::unique_ptr<MediaTrackTranscoder>> mTrackTranscoders;
+    std::vector<std::shared_ptr<MediaTrackTranscoder>> mTrackTranscoders;
     std::mutex mTracksAddedMutex;
     std::unordered_set<const MediaTrackTranscoder*> mTracksAdded GUARDED_BY(mTracksAddedMutex);
 
diff --git a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
index 1ba205b..0a7bf33 100644
--- a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
@@ -34,10 +34,12 @@
  * using a native surface (ANativeWindow). Codec callback events are placed on a message queue and
  * serviced in order on the transcoding thread managed by MediaTrackTranscoder.
  */
-class VideoTrackTranscoder : public MediaTrackTranscoder {
+class VideoTrackTranscoder : public std::enable_shared_from_this<VideoTrackTranscoder>,
+                             public MediaTrackTranscoder {
 public:
-    VideoTrackTranscoder(const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback)
-          : MediaTrackTranscoder(transcoderCallback){};
+    static std::shared_ptr<VideoTrackTranscoder> create(
+            const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback);
+
     virtual ~VideoTrackTranscoder() override;
 
 private:
@@ -55,6 +57,10 @@
         std::condition_variable mCondition;
         std::deque<T> mQueue;
     };
+    class CodecWrapper;
+
+    VideoTrackTranscoder(const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback)
+          : MediaTrackTranscoder(transcoderCallback){};
 
     // MediaTrackTranscoder
     media_status_t runTranscodeLoop() override;
@@ -77,8 +83,7 @@
     void updateTrackFormat(AMediaFormat* outputFormat);
 
     AMediaCodec* mDecoder = nullptr;
-    // Sample release callback holds a reference to the encoder, hence the shared_ptr.
-    std::shared_ptr<AMediaCodec> mEncoder;
+    std::shared_ptr<CodecWrapper> mEncoder;
     ANativeWindow* mSurface = nullptr;
     bool mEosFromSource = false;
     bool mEosFromEncoder = false;
diff --git a/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
index 71d3a4e..502d5aa 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
@@ -53,7 +53,7 @@
 
         switch (GetParam()) {
         case VIDEO:
-            mTranscoder = std::make_shared<VideoTrackTranscoder>(mCallback);
+            mTranscoder = VideoTrackTranscoder::create(mCallback);
             break;
         case PASSTHROUGH:
             mTranscoder = std::make_shared<PassthroughTrackTranscoder>(mCallback);
@@ -164,8 +164,8 @@
     ASSERT_TRUE(mTranscoder->start());
     drainOutputSampleQueue();
     EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
-    EXPECT_TRUE(mTranscoder->stop());
     joinDrainThread();
+    EXPECT_TRUE(mTranscoder->stop());
     EXPECT_FALSE(mQueueWasAborted);
     EXPECT_TRUE(mGotEndOfStream);
 }
@@ -232,9 +232,9 @@
     ASSERT_TRUE(mTranscoder->start());
     drainOutputSampleQueue();
     EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+    joinDrainThread();
     EXPECT_TRUE(mTranscoder->stop());
     EXPECT_FALSE(mTranscoder->start());
-    joinDrainThread();
     EXPECT_FALSE(mQueueWasAborted);
     EXPECT_TRUE(mGotEndOfStream);
 }
@@ -247,9 +247,8 @@
     mTranscoderOutputQueue->abort();
     drainOutputSampleQueue();
     EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_ERROR_IO);
-    EXPECT_TRUE(mTranscoder->stop());
-
     joinDrainThread();
+    EXPECT_TRUE(mTranscoder->stop());
     EXPECT_TRUE(mQueueWasAborted);
     EXPECT_FALSE(mGotEndOfStream);
 }
@@ -265,8 +264,8 @@
 
     drainOutputSampleQueue();
     EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
-    EXPECT_TRUE(mTranscoder->stop());
     joinDrainThread();
+    EXPECT_TRUE(mTranscoder->stop());
     EXPECT_FALSE(mQueueWasAborted);
     EXPECT_TRUE(mGotEndOfStream);
 
diff --git a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
index 1eb9e5a..5f2cd12 100644
--- a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
@@ -95,12 +95,13 @@
 TEST_F(VideoTrackTranscoderTests, SampleSanity) {
     LOG(DEBUG) << "Testing SampleSanity";
     std::shared_ptr<TestCallback> callback = std::make_shared<TestCallback>();
-    VideoTrackTranscoder transcoder{callback};
+    auto transcoder = VideoTrackTranscoder::create(callback);
 
-    EXPECT_EQ(transcoder.configure(mMediaSampleReader, mTrackIndex, mDestinationFormat), AMEDIA_OK);
-    ASSERT_TRUE(transcoder.start());
+    EXPECT_EQ(transcoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+              AMEDIA_OK);
+    ASSERT_TRUE(transcoder->start());
 
-    std::shared_ptr<MediaSampleQueue> outputQueue = transcoder.getOutputQueue();
+    std::shared_ptr<MediaSampleQueue> outputQueue = transcoder->getOutputQueue();
     std::thread sampleConsumerThread{[&outputQueue] {
         uint64_t sampleCount = 0;
         std::shared_ptr<MediaSample> sample;
@@ -137,7 +138,7 @@
     }};
 
     EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
-    EXPECT_TRUE(transcoder.stop());
+    EXPECT_TRUE(transcoder->stop());
 
     sampleConsumerThread.join();
 }
@@ -148,11 +149,70 @@
     std::shared_ptr<TestCallback> callback = std::make_shared<TestCallback>();
     std::shared_ptr<AMediaFormat> nullFormat;
 
-    VideoTrackTranscoder transcoder{callback};
-    EXPECT_EQ(transcoder.configure(mMediaSampleReader, 0 /* trackIndex */, nullFormat),
+    auto transcoder = VideoTrackTranscoder::create(callback);
+    EXPECT_EQ(transcoder->configure(mMediaSampleReader, 0 /* trackIndex */, nullFormat),
               AMEDIA_ERROR_INVALID_PARAMETER);
 }
 
+TEST_F(VideoTrackTranscoderTests, LingeringEncoder) {
+    struct {
+        void wait() {
+            std::unique_lock<std::mutex> lock(mMutex);
+            while (!mSignaled) {
+                mCondition.wait(lock);
+            }
+        }
+
+        void signal() {
+            std::unique_lock<std::mutex> lock(mMutex);
+            mSignaled = true;
+            mCondition.notify_all();
+        }
+
+        std::mutex mMutex;
+        std::condition_variable mCondition;
+        bool mSignaled = false;
+    } semaphore;
+
+    auto callback = std::make_shared<TestCallback>();
+    auto transcoder = VideoTrackTranscoder::create(callback);
+
+    EXPECT_EQ(transcoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+              AMEDIA_OK);
+    ASSERT_TRUE(transcoder->start());
+
+    std::shared_ptr<MediaSampleQueue> outputQueue = transcoder->getOutputQueue();
+    std::vector<std::shared_ptr<MediaSample>> samples;
+    std::thread sampleConsumerThread([&outputQueue, &samples, &semaphore] {
+        std::shared_ptr<MediaSample> sample;
+        while (samples.size() < 10 && !outputQueue->dequeue(&sample)) {
+            ASSERT_NE(sample, nullptr);
+            samples.push_back(sample);
+
+            if (sample->info.flags & SAMPLE_FLAG_END_OF_STREAM) {
+                break;
+            }
+            sample.reset();
+        }
+
+        semaphore.signal();
+    });
+
+    // Wait for the encoder to output samples before stopping and releasing the transcoder.
+    semaphore.wait();
+
+    EXPECT_TRUE(transcoder->stop());
+    transcoder.reset();
+    sampleConsumerThread.join();
+
+    // Return buffers to the codec so that it can resume processing, but keep one buffer to avoid
+    // the codec being released.
+    samples.resize(1);
+
+    // Wait for async codec events.
+    std::this_thread::sleep_for(std::chrono::seconds(1));
+}
+
 }  // namespace android
 
 int main(int argc, char** argv) {
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 6571162..dd2eed3 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -295,12 +295,12 @@
             <Feature name="bitrate-modes" value="VBR,CBR" />
         </MediaCodec>
         <MediaCodec name="c2.android.hevc.encoder" type="video/hevc" variant="!slow-cpu">
-            <!-- profiles and levels:  ProfileMain : MainTierLevel3 -->
-            <Limit name="size" min="2x2" max="960x544" />
+            <!-- profiles and levels:  ProfileMain : MainTierLevel51 -->
+            <Limit name="size" min="2x2" max="512x512" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="8x8" />
-            <Limit name="block-count" range="1-8160" /> <!-- max 960x544 -->
-            <Limit name="blocks-per-second" range="1-244880" />
+            <Limit name="block-count" range="1-4096" /> <!-- max 512x512 -->
+            <Limit name="blocks-per-second" range="1-122880" />
             <Limit name="frame-rate" range="1-120" />
             <Limit name="bitrate" range="1-10000000" />
             <Limit name="complexity" range="0-10"  default="0" />
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index eea5ef1..08cde5d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -1218,13 +1218,13 @@
             return;
         }
 
+        bufRet.streamId = streamId;
         if (outputStream->isAbandoned()) {
             bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
             allReqsSucceeds = false;
             continue;
         }
 
-        bufRet.streamId = streamId;
         size_t handOutBufferCount = outputStream->getOutstandingBuffersCount();
         uint32_t numBuffersRequested = bufReq.numBuffersRequested;
         size_t totalHandout = handOutBufferCount + numBuffersRequested;