Merge "AudioTrack: get/setStartThresholdInFrames" into sc-dev
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4e07c5c..889b8ab 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5384,7 +5384,7 @@
      * </ul></p>
      *
      * <p>Since optical image stabilization generally involves motion much faster than the duration
-     * of individualq image exposure, multiple OIS samples can be included for a single capture
+     * of individual image exposure, multiple OIS samples can be included for a single capture
      * result. For example, if the OIS reporting operates at 200 Hz, a typical camera operating
      * at 30fps may have 6-7 OIS samples per capture result. This information can be combined
      * with the rolling shutter skew to account for lens motion during image exposure in
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index e1cc6b3..3c87531 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -448,6 +448,20 @@
         work->worklets.front()->output.configUpdate.push_back(std::move(csd));
     }
 
+    // handle dynamic bitrate change
+    {
+        IntfImpl::Lock lock = mIntf->lock();
+        std::shared_ptr<C2StreamBitrateInfo::output> bitrate = mIntf->getBitrate_l();
+        lock.unlock();
+
+        if (bitrate != mBitrate) {
+            mBitrate = bitrate;
+            int layerBitrate[2] = {static_cast<int>(mBitrate->value), 0};
+            ALOGV("Calling PVUpdateBitRate %d", layerBitrate[0]);
+            PVUpdateBitRate(mHandle, layerBitrate);
+        }
+    }
+
     std::shared_ptr<const C2GraphicView> rView;
     std::shared_ptr<C2Buffer> inputBuffer;
     bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
diff --git a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
index 5ec88ec..7c2e014 100644
--- a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
@@ -201,6 +201,8 @@
         c2_status_t err = mAllocator->priorGraphicAllocation(handle, &alloc);
         mAllocatorMutex.unlock();
         if (err != OK) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
             return UNKNOWN_ERROR;
         }
         std::shared_ptr<C2GraphicBlock> block =
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 1a92c08..3a7af10 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -33,14 +33,12 @@
 #include <OMX_IndexExt.h>
 
 #include <android/fdsan.h>
-#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/omx/OMXUtils.h>
 #include <media/stagefright/MediaErrors.h>
 #include <ui/Fence.h>
 #include <ui/GraphicBuffer.h>
 #include <utils/Thread.h>
 
-#include "utils/Codec2Mapper.h"
 #include "C2OMXNode.h"
 
 namespace android {
@@ -73,25 +71,6 @@
         jobs->cond.broadcast();
     }
 
-    void setDataspace(android_dataspace dataspace) {
-        Mutexed<Jobs>::Locked jobs(mJobs);
-        ColorUtils::convertDataSpaceToV0(dataspace);
-        jobs->configUpdate.emplace_back(new C2StreamDataSpaceInfo::input(0u, dataspace));
-        int32_t standard = (int32_t(dataspace) & HAL_DATASPACE_STANDARD_MASK)
-            >> HAL_DATASPACE_STANDARD_SHIFT;
-        int32_t transfer = (int32_t(dataspace) & HAL_DATASPACE_TRANSFER_MASK)
-            >> HAL_DATASPACE_TRANSFER_SHIFT;
-        int32_t range = (int32_t(dataspace) & HAL_DATASPACE_RANGE_MASK)
-            >> HAL_DATASPACE_RANGE_SHIFT;
-        std::unique_ptr<C2StreamColorAspectsInfo::input> colorAspects =
-            std::make_unique<C2StreamColorAspectsInfo::input>(0u);
-        if (C2Mapper::map(standard, &colorAspects->primaries, &colorAspects->matrix)
-                && C2Mapper::map(transfer, &colorAspects->transfer)
-                && C2Mapper::map(range, &colorAspects->range)) {
-            jobs->configUpdate.push_back(std::move(colorAspects));
-        }
-    }
-
 protected:
     bool threadLoop() override {
         constexpr nsecs_t kIntervalNs = nsecs_t(10) * 1000 * 1000;  // 10ms
@@ -123,9 +102,6 @@
                     uniqueFds.push_back(std::move(queue.workList.front().fd1));
                     queue.workList.pop_front();
                 }
-                for (const std::unique_ptr<C2Param> &param : jobs->configUpdate) {
-                    items.front()->input.configUpdate.emplace_back(C2Param::Copy(*param));
-                }
 
                 jobs.unlock();
                 for (int fenceFd : fenceFds) {
@@ -143,7 +119,6 @@
                 queued = true;
             }
             if (queued) {
-                jobs->configUpdate.clear();
                 return true;
             }
             if (i == 0) {
@@ -186,7 +161,6 @@
         std::map<std::weak_ptr<Codec2Client::Component>,
                  Queue,
                  std::owner_less<std::weak_ptr<Codec2Client::Component>>> queues;
-        std::vector<std::unique_ptr<C2Param>> configUpdate;
         Condition cond;
     };
     Mutexed<Jobs> mJobs;
@@ -198,9 +172,6 @@
       mQueueThread(new QueueThread) {
     android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ALWAYS);
     mQueueThread->run("C2OMXNode", PRIORITY_AUDIO);
-
-    Mutexed<android_dataspace>::Locked ds(mDataspace);
-    *ds = HAL_DATASPACE_UNKNOWN;
 }
 
 status_t C2OMXNode::freeNode() {
@@ -421,6 +392,8 @@
         if (err != OK) {
             (void)fd0.release();
             (void)fd1.release();
+            native_handle_close(handle);
+            native_handle_delete(handle);
             return UNKNOWN_ERROR;
         }
         block = _C2BlockFactory::CreateGraphicBlock(alloc);
@@ -488,11 +461,8 @@
     android_dataspace dataSpace = (android_dataspace)msg.u.event_data.data1;
     uint32_t pixelFormat = msg.u.event_data.data3;
 
+    // TODO: set dataspace on component to see if it impacts color aspects
     ALOGD("dataspace changed to %#x pixel format: %#x", dataSpace, pixelFormat);
-    mQueueThread->setDataspace(dataSpace);
-
-    Mutexed<android_dataspace>::Locked ds(mDataspace);
-    *ds = dataSpace;
     return OK;
 }
 
@@ -525,8 +495,4 @@
     (void)mBufferSource->onInputBufferEmptied(bufferId, -1);
 }
 
-android_dataspace C2OMXNode::getDataspace() {
-    return *mDataspace.lock();
-}
-
 }  // namespace android
diff --git a/media/codec2/sfplugin/C2OMXNode.h b/media/codec2/sfplugin/C2OMXNode.h
index 5d587bc..1717c96 100644
--- a/media/codec2/sfplugin/C2OMXNode.h
+++ b/media/codec2/sfplugin/C2OMXNode.h
@@ -93,8 +93,6 @@
      */
     void onInputBufferDone(c2_cntr64_t index);
 
-    android_dataspace getDataspace();
-
 private:
     std::weak_ptr<Codec2Client::Component> mComp;
     sp<IOMXBufferSource> mBufferSource;
@@ -103,7 +101,6 @@
     uint32_t mWidth;
     uint32_t mHeight;
     uint64_t mUsage;
-    Mutexed<android_dataspace> mDataspace;
 
     // WORKAROUND: timestamp adjustment
 
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index fe451f6..af11592 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -211,6 +211,8 @@
                 (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
                 &usage, sizeof(usage));
 
+        // NOTE: we do not use/pass through color aspects from GraphicBufferSource as we
+        // communicate that directly to the component.
         mSource->configure(
                 mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace));
         return OK;
@@ -409,10 +411,6 @@
         mNode->onInputBufferDone(index);
     }
 
-    android_dataspace getDataspace() override {
-        return mNode->getDataspace();
-    }
-
 private:
     sp<HGraphicBufferSource> mSource;
     sp<C2OMXNode> mNode;
@@ -1031,9 +1029,6 @@
             }
         }
 
-        // get color aspects
-        getColorAspectsFromFormat(msg, config->mClientColorAspects);
-
         /*
          * Handle dataspace
          */
@@ -1043,12 +1038,12 @@
             int32_t width, height;
             if (msg->findInt32("width", &width)
                     && msg->findInt32("height", &height)) {
-                setDefaultCodecColorAspectsIfNeeded(config->mClientColorAspects, width, height);
+                ColorAspects aspects;
+                getColorAspectsFromFormat(msg, aspects);
+                setDefaultCodecColorAspectsIfNeeded(aspects, width, height);
                 // TODO: read dataspace / color aspect from the component
-                setColorAspectsIntoFormat(
-                        config->mClientColorAspects, const_cast<sp<AMessage> &>(msg));
-                dataSpace = getDataSpaceForColorAspects(
-                        config->mClientColorAspects, true /* mayexpand */);
+                setColorAspectsIntoFormat(aspects, const_cast<sp<AMessage> &>(msg));
+                dataSpace = getDataSpaceForColorAspects(aspects, true /* mayexpand */);
             }
             msg->setInt32("android._dataspace", (int32_t)dataSpace);
             ALOGD("setting dataspace to %x", dataSpace);
@@ -1987,44 +1982,6 @@
     }
 }
 
-static void HandleDataspace(
-        android_dataspace dataspace, ColorAspects *colorAspects, sp<AMessage> *format) {
-    ColorUtils::convertDataSpaceToV0(dataspace);
-    int32_t range, standard, transfer;
-    range = (dataspace & HAL_DATASPACE_RANGE_MASK) >> HAL_DATASPACE_RANGE_SHIFT;
-    if (range == 0) {
-        range = ColorUtils::wrapColorAspectsIntoColorRange(
-                colorAspects->mRange);
-    }
-    standard = (dataspace & HAL_DATASPACE_STANDARD_MASK) >> HAL_DATASPACE_STANDARD_SHIFT;
-    if (standard == 0) {
-        standard = ColorUtils::wrapColorAspectsIntoColorStandard(
-                colorAspects->mPrimaries,
-                colorAspects->mMatrixCoeffs);
-    }
-    transfer = (dataspace & HAL_DATASPACE_TRANSFER_MASK) >> HAL_DATASPACE_TRANSFER_SHIFT;
-    if (transfer == 0) {
-        transfer = ColorUtils::wrapColorAspectsIntoColorTransfer(
-                colorAspects->mTransfer);
-    }
-    ColorAspects newColorAspects;
-    ColorUtils::convertPlatformColorAspectsToCodecAspects(
-            range, standard, transfer, newColorAspects);
-    if (ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
-            newColorAspects, *colorAspects)) {
-        *format = (*format)->dup();
-        (*format)->setInt32(KEY_COLOR_RANGE, range);
-        (*format)->setInt32(KEY_COLOR_STANDARD, standard);
-        (*format)->setInt32(KEY_COLOR_TRANSFER, transfer);
-        // Record current color aspects into |colorAspects|.
-        // NOTE: newColorAspects could have been modified by
-        //       checkIfAspectsChangedAndUnspecifyThem() above,
-        //       so *colorAspects = newColorAspects does not work as intended.
-        ColorUtils::convertPlatformColorAspectsToCodecAspects(
-                range, standard, transfer, *colorAspects);
-    }
-}
-
 void CCodec::onMessageReceived(const sp<AMessage> &msg) {
     TimePoint now = std::chrono::steady_clock::now();
     CCodecWatchdog::getInstance()->watch(this);
@@ -2139,10 +2096,6 @@
 
                 sp<AMessage> outputFormat = config->mOutputFormat;
                 config->updateConfiguration(updates, config->mOutputDomain);
-                if (config->mInputSurface) {
-                    android_dataspace ds = config->mInputSurface->getDataspace();
-                    HandleDataspace(ds, &config->mClientColorAspects, &config->mOutputFormat);
-                }
                 RevertOutputFormatIfNeeded(outputFormat, config->mOutputFormat);
 
                 // copy standard infos to graphic buffers if not already present (otherwise, we
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 2025da2..e7207a5 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -77,34 +77,39 @@
 void CCodecBuffers::handleImageData(const sp<Codec2Buffer> &buffer) {
     sp<ABuffer> imageDataCandidate = buffer->getImageData();
     if (imageDataCandidate == nullptr) {
+        if (mFormatWithImageData) {
+            // We previously sent the format with image data, so use the same format.
+            buffer->setFormat(mFormatWithImageData);
+        }
         return;
     }
-    sp<ABuffer> imageData;
-    if (!mFormat->findBuffer("image-data", &imageData)
-            || imageDataCandidate->size() != imageData->size()
-            || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
+    if (!mLastImageData
+            || imageDataCandidate->size() != mLastImageData->size()
+            || memcmp(imageDataCandidate->data(),
+                      mLastImageData->data(),
+                      mLastImageData->size()) != 0) {
         ALOGD("[%s] updating image-data", mName);
-        sp<AMessage> newFormat = dupFormat();
-        newFormat->setBuffer("image-data", imageDataCandidate);
+        mFormatWithImageData = dupFormat();
+        mLastImageData = imageDataCandidate;
+        mFormatWithImageData->setBuffer("image-data", imageDataCandidate);
         MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
         if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
             int32_t stride = img->mPlane[0].mRowInc;
-            newFormat->setInt32(KEY_STRIDE, stride);
+            mFormatWithImageData->setInt32(KEY_STRIDE, stride);
             ALOGD("[%s] updating stride = %d", mName, stride);
             if (img->mNumPlanes > 1 && stride > 0) {
                 int64_t offsetDelta =
                     (int64_t)img->mPlane[1].mOffset - (int64_t)img->mPlane[0].mOffset;
                 int32_t vstride = int32_t(offsetDelta / stride);
-                newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
+                mFormatWithImageData->setInt32(KEY_SLICE_HEIGHT, vstride);
                 ALOGD("[%s] updating vstride = %d", mName, vstride);
                 buffer->setRange(
                         img->mPlane[0].mOffset,
                         buffer->size() - img->mPlane[0].mOffset);
             }
         }
-        setFormat(newFormat);
-        buffer->setFormat(newFormat);
     }
+    buffer->setFormat(mFormatWithImageData);
 }
 
 // InputBuffers
@@ -273,22 +278,12 @@
 
     if (entry.notify && mFormat != outputFormat) {
         updateSkipCutBuffer(outputFormat);
-        sp<ABuffer> imageData;
-        if (mFormat->findBuffer("image-data", &imageData)) {
-            outputFormat->setBuffer("image-data", imageData);
-        }
-        int32_t stride;
-        if (mFormat->findInt32(KEY_STRIDE, &stride)) {
-            outputFormat->setInt32(KEY_STRIDE, stride);
-        }
-        int32_t sliceHeight;
-        if (mFormat->findInt32(KEY_SLICE_HEIGHT, &sliceHeight)) {
-            outputFormat->setInt32(KEY_SLICE_HEIGHT, sliceHeight);
-        }
+        // Trigger image data processing to the new format
+        mLastImageData.clear();
         ALOGV("[%s] popFromStashAndRegister: output format reference changed: %p -> %p",
                 mName, mFormat.get(), outputFormat.get());
-        ALOGD("[%s] popFromStashAndRegister: output format changed to %s",
-                mName, outputFormat->debugString().c_str());
+        ALOGD("[%s] popFromStashAndRegister: at %lldus, output format changed to %s",
+                mName, (long long)entry.timestamp, outputFormat->debugString().c_str());
         setFormat(outputFormat);
     }
 
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index 7c4e7b1..995d3a4 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -86,6 +86,9 @@
     // Format to be used for creating MediaCodec-facing buffers.
     sp<AMessage> mFormat;
 
+    sp<ABuffer> mLastImageData;
+    sp<AMessage> mFormatWithImageData;
+
 private:
     DISALLOW_EVIL_CONSTRUCTORS(CCodecBuffers);
 };
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
index d9116f7..7e060f6 100644
--- a/media/codec2/sfplugin/CCodecConfig.h
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -27,7 +27,6 @@
 #include <C2Debug.h>
 
 #include <codec2/hidl/client.h>
-#include <media/stagefright/foundation/ColorUtils.h>
 #include <utils/RefBase.h>
 
 #include "InputSurfaceWrapper.h"
@@ -125,7 +124,6 @@
 
     std::shared_ptr<InputSurfaceWrapper> mInputSurface;
     std::unique_ptr<InputSurfaceWrapper::Config> mISConfig;
-    ColorAspects mClientColorAspects;
 
     /// the current configuration. Updated after configure() and based on configUpdate in
     /// onWorkDone
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index fc4ee51..34e6a88 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -713,6 +713,8 @@
     c2_status_t err = mAlloc->priorGraphicAllocation(handle, &alloc);
     if (err != C2_OK) {
         ALOGD("Failed to wrap VideoNativeMetadata into C2GraphicAllocation");
+        native_handle_close(handle);
+        native_handle_delete(handle);
         return nullptr;
     }
     std::shared_ptr<C2GraphicBlock> block = _C2BlockFactory::CreateGraphicBlock(alloc);
diff --git a/media/codec2/sfplugin/InputSurfaceWrapper.h b/media/codec2/sfplugin/InputSurfaceWrapper.h
index d29738c..479acb1 100644
--- a/media/codec2/sfplugin/InputSurfaceWrapper.h
+++ b/media/codec2/sfplugin/InputSurfaceWrapper.h
@@ -106,8 +106,6 @@
      */
     virtual void onInputBufferDone(c2_cntr64_t /* index */) {}
 
-    virtual android_dataspace getDataspace() { return mDataSpace; }
-
 protected:
     android_dataspace mDataSpace;
 };
diff --git a/media/codec2/vndk/include/C2AllocatorGralloc.h b/media/codec2/vndk/include/C2AllocatorGralloc.h
index 578cf76..1da3e14 100644
--- a/media/codec2/vndk/include/C2AllocatorGralloc.h
+++ b/media/codec2/vndk/include/C2AllocatorGralloc.h
@@ -37,7 +37,8 @@
  * Wrap the gralloc handle and metadata into Codec2 handle recognized by
  * C2AllocatorGralloc.
  *
- * @return a new NON-OWNING C2Handle that must be deleted using native_handle_delete.
+ * @return a new NON-OWNING C2Handle that must be closed and deleted using native_handle_close and
+ * native_handle_delete.
  */
 C2Handle *WrapNativeCodec2GrallocHandle(
         const native_handle_t *const handle,
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index e14b4b1..3f6fa7d 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -364,6 +364,8 @@
                 std::shared_ptr<C2GraphicAllocation> alloc;
                 c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
                 if (err != C2_OK) {
+                    native_handle_close(c2Handle);
+                    native_handle_delete(c2Handle);
                     return err;
                 }
                 std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
diff --git a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
index 4ea3c69..234faef 100644
--- a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
+++ b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
@@ -527,11 +527,9 @@
     }
 
     /* check bit rate */
-    /* set max bit rate */
     for (i = 0; i < encParams->nLayers; i++)
     {
         encParams->LayerBitRate[i] = encOption->bitRate[i];
-        encParams->LayerMaxBitRate[i] = encOption->bitRate[i];
     }
     if (encParams->nLayers > 1)
     {
@@ -3305,6 +3303,3 @@
 }
 
 #endif /* #ifndef ORIGINAL_VERSION */
-
-
-
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 5d49759..d103aca 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -91,16 +91,24 @@
                                                    const char* packageName)
 {
     AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
-    // Only system apps can read the op package name. For regular apps the regular package name
-    // is a sufficient replacement
-    streamBuilder->setOpPackageName(packageName);
+    std::optional<std::string> optionalPackageName;
+    if (packageName != nullptr) {
+      optionalPackageName = std::string(packageName);
+    }
+    // Only system apps can read the op package name. For regular apps the
+    // regular package name is a sufficient replacement
+    streamBuilder->setOpPackageName(optionalPackageName);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setAttributionTag(AAudioStreamBuilder* builder,
                                                       const char* attributionTag)
 {
     AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
-    streamBuilder->setAttributionTag(attributionTag);
+    std::optional<std::string> optionalAttrTag;
+    if (attributionTag != nullptr) {
+      optionalAttrTag = std::string(attributionTag);
+    }
+    streamBuilder->setAttributionTag(optionalAttrTag);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index bb39d8b..5737052 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -141,7 +141,7 @@
     }
 
     // TODO b/182392769: reexamine if Identity can be used
-    void setOpPackageName(const std::string opPackageName) {
+    void setOpPackageName(const std::optional<std::string> opPackageName) {
         mOpPackageName = opPackageName;
     }
 
@@ -149,7 +149,7 @@
         return mAttributionTag;
     }
 
-    void setAttributionTag(const std::string attributionTag) {
+    void setAttributionTag(const std::optional<std::string> attributionTag) {
         mAttributionTag = attributionTag;
     }
 
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 87ed8b6..c6e036a 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -18,15 +18,10 @@
     ],
 }
 
-cc_library {
-    name: "libaudiopreprocessing",
+cc_defaults {
+    name: "libaudiopreprocessing-defaults",
     vendor: true,
-    relative_install_path: "soundfx",
     host_supported: true,
-    srcs: ["PreProcessing.cpp"],
-    local_include_dirs: [
-        ".",
-    ],
     cflags: [
         "-Wall",
         "-Werror",
@@ -46,7 +41,6 @@
     header_libs: [
         "libaudioeffects",
         "libhardware_headers",
-        "libwebrtc_absl_headers",
     ],
     target: {
         darwin: {
@@ -54,3 +48,13 @@
         },
     },
 }
+
+cc_library {
+    name: "libaudiopreprocessing",
+    defaults: ["libaudiopreprocessing-defaults"],
+    relative_install_path: "soundfx",
+    srcs: ["PreProcessing.cpp"],
+    header_libs: [
+        "libwebrtc_absl_headers",
+    ],
+}
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 3b0b6d6..19a8b2f 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -105,9 +105,8 @@
     webrtc::AudioProcessing* apm;  // handle on webRTC audio processing module (APM)
     // Audio Processing module builder
     webrtc::AudioProcessingBuilder ap_builder;
-    size_t apmFrameCount;      // buffer size for webRTC process (10 ms)
-    uint32_t apmSamplingRate;  // webRTC APM sampling rate (8/16 or 32 kHz)
-    size_t frameCount;         // buffer size before input resampler ( <=> apmFrameCount)
+    // frameCount represents the size of the buffers used for processing, and must represent 10ms.
+    size_t frameCount;
     uint32_t samplingRate;     // sampling rate at effect process interface
     uint32_t inChannelCount;   // input channel count
     uint32_t outChannelCount;  // output channel count
@@ -119,21 +118,12 @@
     webrtc::AudioProcessing::Config config;
     webrtc::StreamConfig inputConfig;   // input stream configuration
     webrtc::StreamConfig outputConfig;  // output stream configuration
-    int16_t* inBuf;    // input buffer used when resampling
-    size_t inBufSize;  // input buffer size in frames
-    size_t framesIn;   // number of frames in input buffer
-    int16_t* outBuf;    // output buffer used when resampling
-    size_t outBufSize;  // output buffer size in frames
-    size_t framesOut;   // number of frames in output buffer
     uint32_t revChannelCount;  // number of channels on reverse stream
     uint32_t revEnabledMsk;    // bit field containing IDs of enabled pre processors
                                // with reverse channel
     uint32_t revProcessedMsk;  // bit field containing IDs of pre processors with reverse
                                // channel already processed in current round
     webrtc::StreamConfig revConfig;     // reverse stream configuration.
-    int16_t* revBuf;    // reverse channel input buffer
-    size_t revBufSize;  // reverse channel input buffer size
-    size_t framesRev;   // number of frames in reverse channel input buffer
 };
 
 #ifdef DUAL_MIC_TEST
@@ -862,9 +852,7 @@
             ALOGW("Session_CreateEffect could not get apm engine");
             goto error;
         }
-        session->apmSamplingRate = kPreprocDefaultSr;
-        session->apmFrameCount = (kPreprocDefaultSr) / 100;
-        session->frameCount = session->apmFrameCount;
+        session->frameCount = kPreprocDefaultSr / 100;
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
@@ -879,12 +867,6 @@
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
         session->revProcessedMsk = 0;
-        session->inBuf = NULL;
-        session->inBufSize = 0;
-        session->outBuf = NULL;
-        session->outBufSize = 0;
-        session->revBuf = NULL;
-        session->revBufSize = 0;
     }
     status = Effect_Create(&session->effects[procId], session, interface);
     if (status < 0) {
@@ -908,13 +890,6 @@
     if (session->createdMsk == 0) {
         delete session->apm;
         session->apm = NULL;
-        delete session->inBuf;
-        session->inBuf = NULL;
-        free(session->outBuf);
-        session->outBuf = NULL;
-        delete session->revBuf;
-        session->revBuf = NULL;
-
         session->id = 0;
     }
 
@@ -934,24 +909,8 @@
     ALOGV("Session_SetConfig sr %d cnl %08x", config->inputCfg.samplingRate,
           config->inputCfg.channels);
 
-    // AEC implementation is limited to 16kHz
-    if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
-        session->apmSamplingRate = 32000;
-    } else if (config->inputCfg.samplingRate >= 16000) {
-        session->apmSamplingRate = 16000;
-    } else if (config->inputCfg.samplingRate >= 8000) {
-        session->apmSamplingRate = 8000;
-    }
-
-
     session->samplingRate = config->inputCfg.samplingRate;
-    session->apmFrameCount = session->apmSamplingRate / 100;
-    if (session->samplingRate == session->apmSamplingRate) {
-        session->frameCount = session->apmFrameCount;
-    } else {
-        session->frameCount =
-                (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate;
-    }
+    session->frameCount = session->samplingRate / 100;
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
     session->inputConfig.set_sample_rate_hz(session->samplingRate);
@@ -963,13 +922,6 @@
     session->revConfig.set_sample_rate_hz(session->samplingRate);
     session->revConfig.set_num_channels(inCnl);
 
-    // force process buffer reallocation
-    session->inBufSize = 0;
-    session->outBufSize = 0;
-    session->framesIn = 0;
-    session->framesOut = 0;
-
-
     session->state = PREPROC_SESSION_STATE_CONFIG;
     return 0;
 }
@@ -1004,9 +956,6 @@
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
     session->revChannelCount = inCnl;
-    // force process buffer reallocation
-    session->revBufSize = 0;
-    session->framesRev = 0;
 
     return 0;
 }
@@ -1023,12 +972,8 @@
 
 void Session_SetProcEnabled(preproc_session_t* session, uint32_t procId, bool enabled) {
     if (enabled) {
-        if (session->enabledMsk == 0) {
-            session->framesIn = 0;
-        }
         session->enabledMsk |= (1 << procId);
         if (HasReverseStream(procId)) {
-            session->framesRev = 0;
             session->revEnabledMsk |= (1 << procId);
         }
     } else {
@@ -1117,43 +1062,24 @@
         return -EINVAL;
     }
 
+    if (inBuffer->frameCount != outBuffer->frameCount) {
+        ALOGW("inBuffer->frameCount %zu is not equal to outBuffer->frameCount %zu",
+              inBuffer->frameCount, outBuffer->frameCount);
+        return -EINVAL;
+    }
+
+    if (inBuffer->frameCount != session->frameCount) {
+        ALOGW("inBuffer->frameCount %zu != %zu representing 10ms at sampling rate %d",
+              inBuffer->frameCount, session->frameCount, session->samplingRate);
+        return -EINVAL;
+    }
+
     session->processedMsk |= (1 << effect->procId);
 
     //    ALOGV("PreProcessingFx_Process In %d frames enabledMsk %08x processedMsk %08x",
     //         inBuffer->frameCount, session->enabledMsk, session->processedMsk);
-
     if ((session->processedMsk & session->enabledMsk) == session->enabledMsk) {
         effect->session->processedMsk = 0;
-        size_t framesRq = outBuffer->frameCount;
-        size_t framesWr = 0;
-        if (session->framesOut) {
-            size_t fr = session->framesOut;
-            if (outBuffer->frameCount < fr) {
-                fr = outBuffer->frameCount;
-            }
-            memcpy(outBuffer->s16, session->outBuf,
-                   fr * session->outChannelCount * sizeof(int16_t));
-            memmove(session->outBuf, session->outBuf + fr * session->outChannelCount,
-                    (session->framesOut - fr) * session->outChannelCount * sizeof(int16_t));
-            session->framesOut -= fr;
-            framesWr += fr;
-        }
-        outBuffer->frameCount = framesWr;
-        if (framesWr == framesRq) {
-            inBuffer->frameCount = 0;
-            return 0;
-        }
-
-        size_t fr = session->frameCount - session->framesIn;
-        if (inBuffer->frameCount < fr) {
-            fr = inBuffer->frameCount;
-        }
-        session->framesIn += fr;
-        inBuffer->frameCount = fr;
-        if (session->framesIn < session->frameCount) {
-            return 0;
-        }
-        session->framesIn = 0;
         if (int status = effect->session->apm->ProcessStream(
                     (const int16_t* const)inBuffer->s16,
                     (const webrtc::StreamConfig)effect->session->inputConfig,
@@ -1163,34 +1089,6 @@
             ALOGE("Process Stream failed with error %d\n", status);
             return status;
         }
-        outBuffer->frameCount = inBuffer->frameCount;
-
-        if (session->outBufSize < session->framesOut + session->frameCount) {
-            int16_t* buf;
-            session->outBufSize = session->framesOut + session->frameCount;
-            buf = (int16_t*)realloc(
-                    session->outBuf,
-                    session->outBufSize * session->outChannelCount * sizeof(int16_t));
-            if (buf == NULL) {
-                session->framesOut = 0;
-                free(session->outBuf);
-                session->outBuf = NULL;
-                return -ENOMEM;
-            }
-            session->outBuf = buf;
-        }
-
-        fr = session->framesOut;
-        if (framesRq - framesWr < fr) {
-            fr = framesRq - framesWr;
-        }
-        memcpy(outBuffer->s16 + framesWr * session->outChannelCount, session->outBuf,
-               fr * session->outChannelCount * sizeof(int16_t));
-        memmove(session->outBuf, session->outBuf + fr * session->outChannelCount,
-                (session->framesOut - fr) * session->outChannelCount * sizeof(int16_t));
-        session->framesOut -= fr;
-        outBuffer->frameCount += fr;
-
         return 0;
     } else {
         return -ENODATA;
@@ -1565,6 +1463,18 @@
         return -EINVAL;
     }
 
+    if (inBuffer->frameCount != outBuffer->frameCount) {
+        ALOGW("inBuffer->frameCount %zu is not equal to outBuffer->frameCount %zu",
+              inBuffer->frameCount, outBuffer->frameCount);
+        return -EINVAL;
+    }
+
+    if (inBuffer->frameCount != session->frameCount) {
+        ALOGW("inBuffer->frameCount %zu != %zu representing 10ms at sampling rate %d",
+              inBuffer->frameCount, session->frameCount, session->samplingRate);
+        return -EINVAL;
+    }
+
     session->revProcessedMsk |= (1 << effect->procId);
 
     //    ALOGV("PreProcessingFx_ProcessReverse In %d frames revEnabledMsk %08x revProcessedMsk
@@ -1573,16 +1483,6 @@
 
     if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
         effect->session->revProcessedMsk = 0;
-        size_t fr = session->frameCount - session->framesRev;
-        if (inBuffer->frameCount < fr) {
-            fr = inBuffer->frameCount;
-        }
-        session->framesRev += fr;
-        inBuffer->frameCount = fr;
-        if (session->framesRev < session->frameCount) {
-            return 0;
-        }
-        session->framesRev = 0;
         if (int status = effect->session->apm->ProcessReverseStream(
                     (const int16_t* const)inBuffer->s16,
                     (const webrtc::StreamConfig)effect->session->revConfig,
diff --git a/media/libeffects/preprocessing/README.md b/media/libeffects/preprocessing/README.md
new file mode 100644
index 0000000..af46376
--- /dev/null
+++ b/media/libeffects/preprocessing/README.md
@@ -0,0 +1,7 @@
+# Preprocessing effects
+
+## Limitations
+- Preprocessing effects currently work on 10ms worth of data and do not support
+  arbitrary frame counts. This limiation comes from the underlying effects in
+  webrtc modules
+- There is currently no api to communicate this requirement
diff --git a/media/libeffects/preprocessing/benchmarks/Android.bp b/media/libeffects/preprocessing/benchmarks/Android.bp
index c1b2295..fbbcab4 100644
--- a/media/libeffects/preprocessing/benchmarks/Android.bp
+++ b/media/libeffects/preprocessing/benchmarks/Android.bp
@@ -11,27 +11,10 @@
 
 cc_benchmark {
     name: "preprocessing_benchmark",
-    vendor: true,
+    defaults: ["libaudiopreprocessing-defaults"],
     srcs: ["preprocessing_benchmark.cpp"],
-    shared_libs: [
-        "libaudioutils",
-        "liblog",
-        "libutils",
-    ],
     static_libs: [
         "libaudiopreprocessing",
-        "webrtc_audio_processing",
-    ],
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-        "libwebrtc_absl_headers",
+        "libaudioutils",
     ],
 }
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index 6413945..d80b135 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -11,29 +11,29 @@
 }
 
 cc_test {
-    name: "AudioPreProcessingTest",
-    vendor: true,
-    host_supported: true,
-    gtest: false,
-    srcs: ["PreProcessingTest.cpp"],
-    shared_libs: [
-        "libaudioutils",
-        "liblog",
-        "libutils",
+    name: "EffectPreprocessingTest",
+    defaults: ["libaudiopreprocessing-defaults"],
+    gtest: true,
+    test_suites: ["device-tests"],
+    srcs: [
+        "EffectPreprocessingTest.cpp",
+        "EffectTestHelper.cpp",
     ],
     static_libs: [
         "libaudiopreprocessing",
-        "webrtc_audio_processing",
+        "libaudioutils",
     ],
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
+}
+
+cc_test {
+    name: "AudioPreProcessingTest",
+    defaults: ["libaudiopreprocessing-defaults"],
+    gtest: false,
+    srcs: ["PreProcessingTest.cpp"],
+    static_libs: [
+        "libaudiopreprocessing",
+        "libaudioutils",
     ],
-    target: {
-        darwin: {
-            enabled: false,
-        },
-    },
 }
 
 cc_test {
diff --git a/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp b/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp
new file mode 100644
index 0000000..07006a1
--- /dev/null
+++ b/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "EffectTestHelper.h"
+
+#include <getopt.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <tuple>
+#include <vector>
+
+#include <audio_effects/effect_aec.h>
+#include <audio_effects/effect_agc.h>
+#include <audio_effects/effect_agc2.h>
+#include <audio_effects/effect_ns.h>
+#include <log/log.h>
+
+constexpr effect_uuid_t kAGCUuid = {
+        0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+constexpr effect_uuid_t kAGC2Uuid = {
+        0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}};
+constexpr effect_uuid_t kAECUuid = {
+        0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+constexpr effect_uuid_t kNSUuid = {
+        0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+
+static bool isAGCEffect(const effect_uuid_t* uuid) {
+    return uuid == &kAGCUuid;
+}
+static bool isAGC2Effect(const effect_uuid_t* uuid) {
+    return uuid == &kAGC2Uuid;
+}
+static bool isAECEffect(const effect_uuid_t* uuid) {
+    return uuid == &kAECUuid;
+}
+static bool isNSEffect(const effect_uuid_t* uuid) {
+    return uuid == &kNSUuid;
+}
+
+constexpr int kAGCTargetLevels[] = {0, -300, -500, -1000, -3100};
+
+constexpr int kAGCCompLevels[] = {0, -300, -500, -1000, -9000};
+
+constexpr size_t kAGC2FixedDigitalGains[] = {0, 3, 10, 20, 49};
+
+constexpr size_t kAGC2AdaptGigitalLevelEstimators[] = {0, 1};
+
+constexpr size_t kAGC2ExtraSaturationMargins[] = {0, 3, 10, 20, 100};
+
+constexpr size_t kAECEchoDelays[] = {0, 250, 500};
+
+constexpr size_t kNSLevels[] = {0, 1, 2, 3};
+
+struct AGCParams {
+    int targetLevel;
+    int compLevel;
+};
+
+struct AGC2Params {
+    size_t fixedDigitalGain;
+    size_t adaptDigiLevelEstimator;
+    size_t extraSaturationMargin;
+};
+
+struct AECParams {
+    size_t echoDelay;
+};
+
+struct NSParams {
+    size_t level;
+};
+
+struct PreProcParams {
+    const effect_uuid_t* uuid;
+    union {
+        AGCParams agcParams;
+        AGC2Params agc2Params;
+        AECParams aecParams;
+        NSParams nsParams;
+    };
+};
+
+// Create a list of pre-processing parameters to be used for testing
+static const std::vector<PreProcParams> kPreProcParams = [] {
+    std::vector<PreProcParams> result;
+
+    for (const auto targetLevel : kAGCTargetLevels) {
+        for (const auto compLevel : kAGCCompLevels) {
+            AGCParams agcParams = {.targetLevel = targetLevel, .compLevel = compLevel};
+            PreProcParams params = {.uuid = &kAGCUuid, .agcParams = agcParams};
+            result.push_back(params);
+        }
+    }
+
+    for (const auto fixedDigitalGain : kAGC2FixedDigitalGains) {
+        for (const auto adaptDigiLevelEstimator : kAGC2AdaptGigitalLevelEstimators) {
+            for (const auto extraSaturationMargin : kAGC2ExtraSaturationMargins) {
+                AGC2Params agc2Params = {.fixedDigitalGain = fixedDigitalGain,
+                                         .adaptDigiLevelEstimator = adaptDigiLevelEstimator,
+                                         .extraSaturationMargin = extraSaturationMargin};
+                PreProcParams params = {.uuid = &kAGC2Uuid, .agc2Params = agc2Params};
+                result.push_back(params);
+            }
+        }
+    }
+
+    for (const auto echoDelay : kAECEchoDelays) {
+        AECParams aecParams = {.echoDelay = echoDelay};
+        PreProcParams params = {.uuid = &kAECUuid, .aecParams = aecParams};
+        result.push_back(params);
+    }
+
+    for (const auto level : kNSLevels) {
+        NSParams nsParams = {.level = level};
+        PreProcParams params = {.uuid = &kNSUuid, .nsParams = nsParams};
+        result.push_back(params);
+    }
+    return result;
+}();
+
+static const size_t kNumPreProcParams = std::size(kPreProcParams);
+
+void setPreProcParams(const effect_uuid_t* uuid, EffectTestHelper& effect, size_t paramIdx) {
+    const PreProcParams* params = &kPreProcParams[paramIdx];
+    if (isAGCEffect(uuid)) {
+        const AGCParams* agcParams = &params->agcParams;
+        ASSERT_NO_FATAL_FAILURE(effect.setParam(AGC_PARAM_TARGET_LEVEL, agcParams->targetLevel));
+        ASSERT_NO_FATAL_FAILURE(effect.setParam(AGC_PARAM_COMP_GAIN, agcParams->compLevel));
+    } else if (isAGC2Effect(uuid)) {
+        const AGC2Params* agc2Params = &params->agc2Params;
+        ASSERT_NO_FATAL_FAILURE(
+                effect.setParam(AGC2_PARAM_FIXED_DIGITAL_GAIN, agc2Params->fixedDigitalGain));
+        ASSERT_NO_FATAL_FAILURE(effect.setParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
+                                                agc2Params->adaptDigiLevelEstimator));
+        ASSERT_NO_FATAL_FAILURE(effect.setParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
+                                                agc2Params->extraSaturationMargin));
+    } else if (isAECEffect(uuid)) {
+        const AECParams* aecParams = &params->aecParams;
+        ASSERT_NO_FATAL_FAILURE(effect.setParam(AEC_PARAM_ECHO_DELAY, aecParams->echoDelay));
+    } else if (isNSEffect(uuid)) {
+        const NSParams* nsParams = &params->nsParams;
+        ASSERT_NO_FATAL_FAILURE(effect.setParam(NS_PARAM_LEVEL, nsParams->level));
+    }
+}
+
+typedef std::tuple<int, int, int, int> SingleEffectTestParam;
+class SingleEffectTest : public ::testing::TestWithParam<SingleEffectTestParam> {
+  public:
+    SingleEffectTest()
+        : mSampleRate(EffectTestHelper::kSampleRates[std::get<1>(GetParam())]),
+          mFrameCount(mSampleRate * EffectTestHelper::kTenMilliSecVal),
+          mLoopCount(EffectTestHelper::kLoopCounts[std::get<2>(GetParam())]),
+          mTotalFrameCount(mFrameCount * mLoopCount),
+          mChMask(EffectTestHelper::kChMasks[std::get<0>(GetParam())]),
+          mChannelCount(audio_channel_count_from_in_mask(mChMask)),
+          mParamIdx(std::get<3>(GetParam())),
+          mUuid(kPreProcParams[mParamIdx].uuid){};
+
+    const size_t mSampleRate;
+    const size_t mFrameCount;
+    const size_t mLoopCount;
+    const size_t mTotalFrameCount;
+    const size_t mChMask;
+    const size_t mChannelCount;
+    const size_t mParamIdx;
+    const effect_uuid_t* mUuid;
+};
+
+// Tests applying a single effect
+TEST_P(SingleEffectTest, SimpleProcess) {
+    SCOPED_TRACE(testing::Message() << " chMask: " << mChMask << " sampleRate: " << mSampleRate
+                                    << " loopCount: " << mLoopCount << " paramIdx " << mParamIdx);
+
+    EffectTestHelper effect(mUuid, mChMask, mSampleRate, mLoopCount);
+
+    ASSERT_NO_FATAL_FAILURE(effect.createEffect());
+    ASSERT_NO_FATAL_FAILURE(effect.setConfig(isAECEffect(mUuid)));
+    ASSERT_NO_FATAL_FAILURE(setPreProcParams(mUuid, effect, mParamIdx));
+
+    // Initialize input buffer with deterministic pseudo-random values
+    std::vector<int16_t> input(mTotalFrameCount * mChannelCount);
+    std::vector<int16_t> output(mTotalFrameCount * mChannelCount);
+    std::vector<int16_t> farInput(mTotalFrameCount * mChannelCount);
+    std::minstd_rand gen(mChMask);
+    std::uniform_int_distribution<int16_t> dis(INT16_MIN, INT16_MAX);
+    for (auto& in : input) {
+        in = dis(gen);
+    }
+    if (isAECEffect(mUuid)) {
+        for (auto& farIn : farInput) {
+            farIn = dis(gen);
+        }
+    }
+    ASSERT_NO_FATAL_FAILURE(effect.process(input.data(), output.data(), isAECEffect(mUuid)));
+    if (isAECEffect(mUuid)) {
+        ASSERT_NO_FATAL_FAILURE(effect.process_reverse(farInput.data(), output.data()));
+    }
+    ASSERT_NO_FATAL_FAILURE(effect.releaseEffect());
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        PreProcTestAll, SingleEffectTest,
+        ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumChMasks),
+                           ::testing::Range(0, (int)EffectTestHelper::kNumSampleRates),
+                           ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
+                           ::testing::Range(0, (int)kNumPreProcParams)));
+
+typedef std::tuple<int, int, int> SingleEffectComparisonTestParam;
+class SingleEffectComparisonTest
+    : public ::testing::TestWithParam<SingleEffectComparisonTestParam> {
+  public:
+    SingleEffectComparisonTest()
+        : mSampleRate(EffectTestHelper::kSampleRates[std::get<0>(GetParam())]),
+          mFrameCount(mSampleRate * EffectTestHelper::kTenMilliSecVal),
+          mLoopCount(EffectTestHelper::kLoopCounts[std::get<1>(GetParam())]),
+          mTotalFrameCount(mFrameCount * mLoopCount),
+          mParamIdx(std::get<2>(GetParam())),
+          mUuid(kPreProcParams[mParamIdx].uuid){};
+
+    const size_t mSampleRate;
+    const size_t mFrameCount;
+    const size_t mLoopCount;
+    const size_t mTotalFrameCount;
+    const size_t mParamIdx;
+    const effect_uuid_t* mUuid;
+};
+
+// Compares first channel in multi-channel output to mono output when same effect is applied
+TEST_P(SingleEffectComparisonTest, SimpleProcess) {
+    SCOPED_TRACE(testing::Message() << " sampleRate: " << mSampleRate
+                                    << " loopCount: " << mLoopCount << " paramIdx " << mParamIdx);
+
+    // Initialize mono input buffer with deterministic pseudo-random values
+    std::vector<int16_t> monoInput(mTotalFrameCount);
+    std::vector<int16_t> monoFarInput(mTotalFrameCount);
+
+    std::minstd_rand gen(mSampleRate);
+    std::uniform_int_distribution<int16_t> dis(INT16_MIN, INT16_MAX);
+    for (auto& in : monoInput) {
+        in = dis(gen);
+    }
+    if (isAECEffect(mUuid)) {
+        for (auto& farIn : monoFarInput) {
+            farIn = dis(gen);
+        }
+    }
+
+    // Apply effect on mono channel
+    EffectTestHelper monoEffect(mUuid, AUDIO_CHANNEL_INDEX_MASK_1, mSampleRate, mLoopCount);
+
+    ASSERT_NO_FATAL_FAILURE(monoEffect.createEffect());
+    ASSERT_NO_FATAL_FAILURE(monoEffect.setConfig(isAECEffect(mUuid)));
+    ASSERT_NO_FATAL_FAILURE(setPreProcParams(mUuid, monoEffect, mParamIdx));
+
+    std::vector<int16_t> monoOutput(mTotalFrameCount);
+    ASSERT_NO_FATAL_FAILURE(
+            monoEffect.process(monoInput.data(), monoOutput.data(), isAECEffect(mUuid)));
+    if (isAECEffect(mUuid)) {
+        ASSERT_NO_FATAL_FAILURE(monoEffect.process_reverse(monoFarInput.data(), monoOutput.data()));
+    }
+    ASSERT_NO_FATAL_FAILURE(monoEffect.releaseEffect());
+
+    for (size_t chMask : EffectTestHelper::kChMasks) {
+        size_t channelCount = audio_channel_count_from_in_mask(chMask);
+
+        EffectTestHelper testEffect(mUuid, chMask, mSampleRate, mLoopCount);
+
+        ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
+        ASSERT_NO_FATAL_FAILURE(testEffect.setConfig(isAECEffect(mUuid)));
+        ASSERT_NO_FATAL_FAILURE(setPreProcParams(mUuid, testEffect, mParamIdx));
+
+        std::vector<int16_t> testInput(mTotalFrameCount * channelCount);
+        std::vector<int16_t> testFarInput(mTotalFrameCount * channelCount);
+
+        // Repeat mono channel data to all the channels
+        // adjust_channels() zero fills channels > 2, hence can't be used here
+        for (size_t i = 0; i < mTotalFrameCount; ++i) {
+            auto* fpInput = &testInput[i * channelCount];
+            std::fill(fpInput, fpInput + channelCount, monoInput[i]);
+        }
+        if (isAECEffect(mUuid)) {
+            for (size_t i = 0; i < mTotalFrameCount; ++i) {
+                auto* fpFarInput = &testFarInput[i * channelCount];
+                std::fill(fpFarInput, fpFarInput + channelCount, monoFarInput[i]);
+            }
+        }
+
+        std::vector<int16_t> testOutput(mTotalFrameCount * channelCount);
+        ASSERT_NO_FATAL_FAILURE(
+                testEffect.process(testInput.data(), testOutput.data(), isAECEffect(mUuid)));
+        if (isAECEffect(mUuid)) {
+            ASSERT_NO_FATAL_FAILURE(
+                    testEffect.process_reverse(testFarInput.data(), testOutput.data()));
+        }
+        ASSERT_NO_FATAL_FAILURE(testEffect.releaseEffect());
+
+        // Adjust the test output to mono channel
+        std::vector<int16_t> monoTestOutput(mTotalFrameCount);
+        adjust_channels(testOutput.data(), channelCount, monoTestOutput.data(), FCC_1,
+                        sizeof(int16_t), mTotalFrameCount * sizeof(int16_t) * channelCount);
+
+        ASSERT_EQ(0, memcmp(monoOutput.data(), monoTestOutput.data(),
+                            mTotalFrameCount * sizeof(int16_t)))
+                << "Mono channel output does not match with reference output \n";
+    }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        PreProcTestAll, SingleEffectComparisonTest,
+        ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumSampleRates),
+                           ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
+                           ::testing::Range(0, (int)kNumPreProcParams)));
+
+int main(int argc, char** argv) {
+    ::testing::InitGoogleTest(&argc, argv);
+    int status = RUN_ALL_TESTS();
+    ALOGV("Test result = %d", status);
+    return status;
+}
diff --git a/media/libeffects/preprocessing/tests/EffectTestHelper.cpp b/media/libeffects/preprocessing/tests/EffectTestHelper.cpp
new file mode 100644
index 0000000..79200b6
--- /dev/null
+++ b/media/libeffects/preprocessing/tests/EffectTestHelper.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "EffectTestHelper.h"
+extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
+
+void EffectTestHelper::createEffect() {
+    int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(mUuid, 1, 1, &mEffectHandle);
+    ASSERT_EQ(status, 0) << "create_effect returned an error " << status;
+}
+
+void EffectTestHelper::releaseEffect() {
+    int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(mEffectHandle);
+    ASSERT_EQ(status, 0) << "release_effect returned an error " << status;
+}
+
+void EffectTestHelper::setConfig(bool configReverse) {
+    effect_config_t config{};
+    config.inputCfg.samplingRate = config.outputCfg.samplingRate = mSampleRate;
+    config.inputCfg.channels = config.outputCfg.channels = mChMask;
+    config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+
+    int reply = 0;
+    uint32_t replySize = sizeof(reply);
+
+    int status = (*mEffectHandle)
+                         ->command(mEffectHandle, EFFECT_CMD_SET_CONFIG, sizeof(effect_config_t),
+                                   &config, &replySize, &reply);
+    ASSERT_EQ(status, 0) << "set_config returned an error " << status;
+    ASSERT_EQ(reply, 0) << "set_config reply non zero " << reply;
+
+    if (configReverse) {
+        int status = (*mEffectHandle)
+                             ->command(mEffectHandle, EFFECT_CMD_SET_CONFIG_REVERSE,
+                                       sizeof(effect_config_t), &config, &replySize, &reply);
+        ASSERT_EQ(status, 0) << "set_config_reverse returned an error " << status;
+        ASSERT_EQ(reply, 0) << "set_config_reverse reply non zero " << reply;
+    }
+
+    status = (*mEffectHandle)
+                     ->command(mEffectHandle, EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
+    ASSERT_EQ(status, 0) << "cmd_enable returned an error " << status;
+    ASSERT_EQ(reply, 0) << "cmd_enable reply non zero " << reply;
+}
+
+void EffectTestHelper::setParam(uint32_t type, uint32_t value) {
+    int reply = 0;
+    uint32_t replySize = sizeof(reply);
+    uint32_t paramData[2] = {type, value};
+    auto effectParam = (effect_param_t*)malloc(sizeof(effect_param_t) + sizeof(paramData));
+    memcpy(&effectParam->data[0], &paramData[0], sizeof(paramData));
+    effectParam->psize = sizeof(paramData[0]);
+    effectParam->vsize = sizeof(paramData[1]);
+    int status = (*mEffectHandle)
+                         ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
+                                   sizeof(effect_param_t) + sizeof(paramData), effectParam,
+                                   &replySize, &reply);
+    free(effectParam);
+    ASSERT_EQ(status, 0) << "set_param returned an error " << status;
+    ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
+}
+
+void EffectTestHelper::process(int16_t* input, int16_t* output, bool setAecEchoDelay) {
+    audio_buffer_t inBuffer = {.frameCount = mFrameCount, .s16 = input};
+    audio_buffer_t outBuffer = {.frameCount = mFrameCount, .s16 = output};
+    for (size_t i = 0; i < mLoopCount; i++) {
+        if (setAecEchoDelay) ASSERT_NO_FATAL_FAILURE(setParam(AEC_PARAM_ECHO_DELAY, kAECDelay));
+        int status = (*mEffectHandle)->process(mEffectHandle, &inBuffer, &outBuffer);
+        ASSERT_EQ(status, 0) << "process returned an error " << status;
+
+        inBuffer.s16 += mFrameCount * mChannelCount;
+        outBuffer.s16 += mFrameCount * mChannelCount;
+    }
+}
+
+void EffectTestHelper::process_reverse(int16_t* farInput, int16_t* output) {
+    audio_buffer_t farInBuffer = {.frameCount = mFrameCount, .s16 = farInput};
+    audio_buffer_t outBuffer = {.frameCount = mFrameCount, .s16 = output};
+    for (size_t i = 0; i < mLoopCount; i++) {
+        int status = (*mEffectHandle)->process_reverse(mEffectHandle, &farInBuffer, &outBuffer);
+        ASSERT_EQ(status, 0) << "process returned an error " << status;
+
+        farInBuffer.s16 += mFrameCount * mChannelCount;
+        outBuffer.s16 += mFrameCount * mChannelCount;
+    }
+}
diff --git a/media/libeffects/preprocessing/tests/EffectTestHelper.h b/media/libeffects/preprocessing/tests/EffectTestHelper.h
new file mode 100644
index 0000000..117cf7b
--- /dev/null
+++ b/media/libeffects/preprocessing/tests/EffectTestHelper.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <array>
+#include <audio_effects/effect_aec.h>
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
+#include <climits>
+#include <cstdlib>
+#include <gtest/gtest.h>
+#include <hardware/audio_effect.h>
+#include <log/log.h>
+#include <random>
+#include <stdint.h>
+#include <system/audio.h>
+#include <vector>
+
+template <typename T>
+static float computeSnr(const T* ref, const T* tst, size_t count) {
+    double signal{};
+    double noise{};
+
+    for (size_t i = 0; i < count; ++i) {
+        const double value(ref[i]);
+        const double diff(tst[i] - value);
+        signal += value * value;
+        noise += diff * diff;
+    }
+    // Initialized to large value to handle
+    // cases where ref and tst match exactly
+    float snr = FLT_MAX;
+    if (signal > 0.0f && noise > 0.0f) {
+        snr = 10.f * log(signal / noise);
+    }
+    return snr;
+}
+
+class EffectTestHelper {
+  public:
+    EffectTestHelper(const effect_uuid_t* uuid, size_t chMask, size_t sampleRate, size_t loopCount)
+        : mUuid(uuid),
+          mChMask(chMask),
+          mChannelCount(audio_channel_count_from_in_mask(mChMask)),
+          mSampleRate(sampleRate),
+          mFrameCount(mSampleRate * kTenMilliSecVal),
+          mLoopCount(loopCount) {}
+    void createEffect();
+    void releaseEffect();
+    void setConfig(bool configReverse);
+    void setParam(uint32_t type, uint32_t val);
+    void process(int16_t* input, int16_t* output, bool setAecEchoDelay);
+    void process_reverse(int16_t* farInput, int16_t* output);
+
+    // Corresponds to SNR for 1 bit difference between two int16_t signals
+    static constexpr float kSNRThreshold = 90.308998;
+
+    static constexpr audio_channel_mask_t kChMasks[] = {
+            AUDIO_CHANNEL_IN_MONO,
+            AUDIO_CHANNEL_IN_STEREO,
+            AUDIO_CHANNEL_IN_FRONT_BACK,
+            AUDIO_CHANNEL_IN_6,
+            AUDIO_CHANNEL_IN_2POINT0POINT2,
+            AUDIO_CHANNEL_IN_2POINT1POINT2,
+            AUDIO_CHANNEL_IN_3POINT0POINT2,
+            AUDIO_CHANNEL_IN_3POINT1POINT2,
+            AUDIO_CHANNEL_IN_5POINT1,
+            AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO,
+            AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO,
+            AUDIO_CHANNEL_IN_VOICE_CALL_MONO,
+    };
+
+    static constexpr float kTenMilliSecVal = 0.01;
+
+    static constexpr size_t kNumChMasks = std::size(kChMasks);
+
+    static constexpr size_t kSampleRates[] = {8000,  11025, 12000, 16000, 22050,
+                                              24000, 32000, 44100, 48000};
+
+    static constexpr size_t kNumSampleRates = std::size(kSampleRates);
+
+    static constexpr size_t kLoopCounts[] = {1, 4};
+
+    static constexpr size_t kNumLoopCounts = std::size(kLoopCounts);
+
+    static constexpr size_t kAECDelay = 0;
+
+  private:
+    const effect_uuid_t* mUuid;
+    const size_t mChMask;
+    const size_t mChannelCount;
+    const size_t mSampleRate;
+    const size_t mFrameCount;
+    const size_t mLoopCount;
+    effect_handle_t mEffectHandle{};
+};
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index e0025fe..3bd93f8 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -451,8 +451,8 @@
         }
         audio_buffer_t inputBuffer, outputBuffer;
         audio_buffer_t farInBuffer{};
-        inputBuffer.frameCount = samplesRead;
-        outputBuffer.frameCount = samplesRead;
+        inputBuffer.frameCount = frameLength;
+        outputBuffer.frameCount = frameLength;
         inputBuffer.s16 = in.data();
         outputBuffer.s16 = out.data();
 
@@ -472,7 +472,7 @@
                 }
             }
 
-            farInBuffer.frameCount = samplesRead;
+            farInBuffer.frameCount = frameLength;
             farInBuffer.s16 = farIn.data();
         }
 
@@ -519,6 +519,7 @@
         }
         frameCounter += frameLength;
     }
+    printf("frameCounter: [%d]\n", frameCounter);
     // Release all the effect handles created
     for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
         if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle[i]);
diff --git a/media/libeffects/preprocessing/tests/build_and_run_all_unit_tests.sh b/media/libeffects/preprocessing/tests/build_and_run_all_unit_tests.sh
index 942f2ec..35da13e 100755
--- a/media/libeffects/preprocessing/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/preprocessing/tests/build_and_run_all_unit_tests.sh
@@ -59,9 +59,13 @@
 
 fs_arr=(
     8000
+    11025
+    12000
     16000
+    22050
     24000
     32000
+    44100
     48000
 )
 
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index 74ddce4..413f049 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -29,24 +29,37 @@
 
 namespace android {
 
-static AMediaFormat* mergeMediaFormats(AMediaFormat* base, AMediaFormat* overlay) {
-    if (base == nullptr || overlay == nullptr) {
+static std::shared_ptr<AMediaFormat> createVideoTrackFormat(AMediaFormat* srcFormat,
+                                                            AMediaFormat* options) {
+    if (srcFormat == nullptr || options == nullptr) {
         LOG(ERROR) << "Cannot merge null formats";
         return nullptr;
     }
 
-    AMediaFormat* format = AMediaFormat_new();
-    if (AMediaFormat_copy(format, base) != AMEDIA_OK) {
-        AMediaFormat_delete(format);
-        return nullptr;
+    // ------- Define parameters to copy from the source track format -------
+    std::vector<AMediaFormatUtils::EntryCopier> srcParamsToCopy{
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_MIME, String),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_DURATION, Int64),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_WIDTH, Int32),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_HEIGHT, Int32),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_FRAME_RATE, Int32),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_RANGE, Int32),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_STANDARD, Int32),
+            ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_TRANSFER, Int32),
+    };
+
+    // If the destination codec is the same as the source codec, we can preserve profile and level
+    // from the source track as default values. Otherwise leave them unspecified.
+    const char *srcMime, *dstMime;
+    AMediaFormat_getString(srcFormat, AMEDIAFORMAT_KEY_MIME, &srcMime);
+    if (!AMediaFormat_getString(options, AMEDIAFORMAT_KEY_MIME, &dstMime) ||
+        strcmp(srcMime, dstMime) == 0) {
+        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, String));
+        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, String));
     }
 
-    // Note: AMediaFormat does not expose a function for appending values from another format or for
-    // iterating over all values and keys in a format. Instead we define a static list of known keys
-    // along with their value types and copy the ones that are present. A better solution would be
-    // to either implement required functions in NDK or to parse the overlay format's string
-    // representation and copy all existing keys.
-    static const AMediaFormatUtils::EntryCopier kSupportedFormatEntries[] = {
+    // ------- Define parameters to copy from the caller's options -------
+    static const std::vector<AMediaFormatUtils::EntryCopier> kSupportedOptions{
             ENTRY_COPIER(AMEDIAFORMAT_KEY_MIME, String),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_DURATION, Int64),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_WIDTH, Int32),
@@ -54,7 +67,6 @@
             ENTRY_COPIER(AMEDIAFORMAT_KEY_BIT_RATE, Int32),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, Int32),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, Int32),
-            ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_FORMAT, Int32),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_RANGE, Int32),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_STANDARD, Int32),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_TRANSFER, Int32),
@@ -63,10 +75,12 @@
             ENTRY_COPIER(AMEDIAFORMAT_KEY_PRIORITY, Int32),
             ENTRY_COPIER2(AMEDIAFORMAT_KEY_OPERATING_RATE, Float, Int32),
     };
-    const size_t entryCount = sizeof(kSupportedFormatEntries) / sizeof(kSupportedFormatEntries[0]);
 
-    AMediaFormatUtils::CopyFormatEntries(overlay, format, kSupportedFormatEntries, entryCount);
-    return format;
+    // ------- Copy parameters from source and options to the destination -------
+    auto trackFormat = std::shared_ptr<AMediaFormat>(AMediaFormat_new(), &AMediaFormat_delete);
+    AMediaFormatUtils::CopyFormatEntries(srcFormat, trackFormat.get(), srcParamsToCopy);
+    AMediaFormatUtils::CopyFormatEntries(options, trackFormat.get(), kSupportedOptions);
+    return trackFormat;
 }
 
 void MediaTranscoder::onThreadFinished(const void* thread, media_status_t threadStatus,
@@ -270,7 +284,8 @@
     return trackFormats;
 }
 
-media_status_t MediaTranscoder::configureTrackFormat(size_t trackIndex, AMediaFormat* trackFormat) {
+media_status_t MediaTranscoder::configureTrackFormat(size_t trackIndex,
+                                                     AMediaFormat* destinationOptions) {
     if (mSampleReader == nullptr) {
         LOG(ERROR) << "Source must be configured before tracks";
         return AMEDIA_ERROR_INVALID_OPERATION;
@@ -281,14 +296,15 @@
     }
 
     std::shared_ptr<MediaTrackTranscoder> transcoder;
-    std::shared_ptr<AMediaFormat> format;
+    std::shared_ptr<AMediaFormat> trackFormat;
 
-    if (trackFormat == nullptr) {
+    if (destinationOptions == nullptr) {
         transcoder = std::make_shared<PassthroughTrackTranscoder>(shared_from_this());
     } else {
+        AMediaFormat* srcTrackFormat = mSourceTrackFormats[trackIndex].get();
+
         const char* srcMime = nullptr;
-        if (!AMediaFormat_getString(mSourceTrackFormats[trackIndex].get(), AMEDIAFORMAT_KEY_MIME,
-                                    &srcMime)) {
+        if (!AMediaFormat_getString(srcTrackFormat, AMEDIAFORMAT_KEY_MIME, &srcMime)) {
             LOG(ERROR) << "Source track #" << trackIndex << " has no mime type";
             return AMEDIA_ERROR_MALFORMED;
         }
@@ -301,7 +317,7 @@
         }
 
         const char* dstMime = nullptr;
-        if (AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &dstMime)) {
+        if (AMediaFormat_getString(destinationOptions, AMEDIAFORMAT_KEY_MIME, &dstMime)) {
             if (strncmp(dstMime, "video/", 6) != 0) {
                 LOG(ERROR) << "Unable to convert media types for track #" << trackIndex << ", from "
                            << srcMime << " to " << dstMime;
@@ -311,14 +327,11 @@
 
         transcoder = VideoTrackTranscoder::create(shared_from_this(), mPid, mUid);
 
-        AMediaFormat* mergedFormat =
-                mergeMediaFormats(mSourceTrackFormats[trackIndex].get(), trackFormat);
-        if (mergedFormat == nullptr) {
-            LOG(ERROR) << "Unable to merge source and destination formats";
+        trackFormat = createVideoTrackFormat(srcTrackFormat, destinationOptions);
+        if (trackFormat == nullptr) {
+            LOG(ERROR) << "Unable to create video track format";
             return AMEDIA_ERROR_UNKNOWN;
         }
-
-        format = std::shared_ptr<AMediaFormat>(mergedFormat, &AMediaFormat_delete);
     }
 
     media_status_t status = mSampleReader->selectTrack(trackIndex);
@@ -327,7 +340,7 @@
         return status;
     }
 
-    status = transcoder->configure(mSampleReader, trackIndex, format);
+    status = transcoder->configure(mSampleReader, trackIndex, trackFormat);
     if (status != AMEDIA_OK) {
         LOG(ERROR) << "Configure track transcoder for track #" << trackIndex << " returned error "
                    << status;
diff --git a/media/libmediatranscoding/transcoder/NdkCommon.cpp b/media/libmediatranscoding/transcoder/NdkCommon.cpp
index fb909b2..2d85df7 100644
--- a/media/libmediatranscoding/transcoder/NdkCommon.cpp
+++ b/media/libmediatranscoding/transcoder/NdkCommon.cpp
@@ -60,19 +60,19 @@
 DEFINE_FORMAT_VALUE_COPY_FUNC(int32_t, Int32);
 DEFINE_FORMAT_VALUE_COPY_FUNC(float, Float);
 
-void CopyFormatEntries(AMediaFormat* from, AMediaFormat* to, const EntryCopier* entries,
-                       size_t entryCount) {
+void CopyFormatEntries(AMediaFormat* from, AMediaFormat* to,
+                       const std::vector<EntryCopier>& entries) {
     if (from == nullptr || to == nullptr) {
         LOG(ERROR) << "Cannot copy null formats";
         return;
-    } else if (entries == nullptr || entryCount < 1) {
+    } else if (entries.empty()) {
         LOG(WARNING) << "No entries to copy";
         return;
     }
 
-    for (size_t i = 0; i < entryCount; ++i) {
-        if (!entries[i].copy(entries[i].key, from, to) && entries[i].copy2 != nullptr) {
-            entries[i].copy2(entries[i].key, from, to);
+    for (auto& entry : entries) {
+        if (!entry.copy(entry.key, from, to) && entry.copy2 != nullptr) {
+            entry.copy2(entry.key, from, to);
         }
     }
 }
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index ab08d73..4405180 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -147,7 +147,7 @@
         if (auto transcoder = wrapper->getTranscoder()) {
             const bool isDecoder = codec == transcoder->mDecoder;
             const char* kCodecName = (isDecoder ? "Decoder" : "Encoder");
-            LOG(DEBUG) << kCodecName << " format changed: " << AMediaFormat_toString(format);
+            LOG(INFO) << kCodecName << " format changed: " << AMediaFormat_toString(format);
             transcoder->mCodecMessageQueue.push([transcoder, format, isDecoder] {
                 transcoder->updateTrackFormat(format, isDecoder);
             });
@@ -280,7 +280,7 @@
     }
     mEncoder = std::make_shared<CodecWrapper>(encoder, shared_from_this());
 
-    LOG(DEBUG) << "Configuring encoder with: " << AMediaFormat_toString(mDestinationFormat.get());
+    LOG(INFO) << "Configuring encoder with: " << AMediaFormat_toString(mDestinationFormat.get());
     status = AMediaCodec_configure(mEncoder->getCodec(), mDestinationFormat.get(),
                                    NULL /* surface */, NULL /* crypto */,
                                    AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
@@ -332,15 +332,13 @@
     AMediaFormat_setInt32(decoderFormat.get(), TBD_AMEDIACODEC_PARAMETER_KEY_ALLOW_FRAME_DROP, 0);
 
     // Copy over configurations that apply to both encoder and decoder.
-    static const EntryCopier kEncoderEntriesToCopy[] = {
+    static const std::vector<EntryCopier> kEncoderEntriesToCopy{
             ENTRY_COPIER2(AMEDIAFORMAT_KEY_OPERATING_RATE, Float, Int32),
             ENTRY_COPIER(AMEDIAFORMAT_KEY_PRIORITY, Int32),
     };
-    const size_t entryCount = sizeof(kEncoderEntriesToCopy) / sizeof(kEncoderEntriesToCopy[0]);
-    CopyFormatEntries(mDestinationFormat.get(), decoderFormat.get(), kEncoderEntriesToCopy,
-                      entryCount);
+    CopyFormatEntries(mDestinationFormat.get(), decoderFormat.get(), kEncoderEntriesToCopy);
 
-    LOG(DEBUG) << "Configuring decoder with: " << AMediaFormat_toString(decoderFormat.get());
+    LOG(INFO) << "Configuring decoder with: " << AMediaFormat_toString(decoderFormat.get());
     status = AMediaCodec_configure(mDecoder, decoderFormat.get(), mSurface, NULL /* crypto */,
                                    0 /* flags */);
     if (status != AMEDIA_OK) {
@@ -487,9 +485,6 @@
         onOutputSampleAvailable(sample);
 
         mLastSampleWasSync = sample->info.flags & SAMPLE_FLAG_SYNC_SAMPLE;
-    } else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
-        AMediaFormat* newFormat = AMediaCodec_getOutputFormat(mEncoder->getCodec());
-        LOG(DEBUG) << "Encoder output format changed: " << AMediaFormat_toString(newFormat);
     }
 
     if (bufferInfo.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
@@ -509,15 +504,14 @@
 
 void VideoTrackTranscoder::updateTrackFormat(AMediaFormat* outputFormat, bool fromDecoder) {
     if (fromDecoder) {
-        static const AMediaFormatUtils::EntryCopier kValuesToCopy[] = {
+        static const std::vector<AMediaFormatUtils::EntryCopier> kValuesToCopy{
                 ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_RANGE, Int32),
                 ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_STANDARD, Int32),
                 ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_TRANSFER, Int32),
         };
         AMediaFormat* params = AMediaFormat_new();
         if (params != nullptr) {
-            AMediaFormatUtils::CopyFormatEntries(outputFormat, params, kValuesToCopy,
-                                                 std::size(kValuesToCopy));
+            AMediaFormatUtils::CopyFormatEntries(outputFormat, params, kValuesToCopy);
             if (AMediaCodec_setParameters(mEncoder->getCodec(), params) != AMEDIA_OK) {
                 LOG(WARNING) << "Unable to update encoder with color information";
             }
@@ -589,7 +583,7 @@
     // TODO: transfer other fields as required.
 
     mActualOutputFormat = std::shared_ptr<AMediaFormat>(formatCopy, &AMediaFormat_delete);
-    LOG(DEBUG) << "Actual output format: " << AMediaFormat_toString(formatCopy);
+    LOG(INFO) << "Actual output format: " << AMediaFormat_toString(formatCopy);
 
     notifyTrackFormatAvailable();
 }
diff --git a/media/libmediatranscoding/transcoder/include/media/NdkCommon.h b/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
index a7ed6a7..c5547c6 100644
--- a/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
+++ b/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
@@ -19,6 +19,8 @@
 
 #include <media/NdkMediaFormat.h>
 
+#include <vector>
+
 extern const char* AMEDIA_MIMETYPE_VIDEO_VP8;
 extern const char* AMEDIA_MIMETYPE_VIDEO_VP9;
 extern const char* AMEDIA_MIMETYPE_VIDEO_AV1;
@@ -82,8 +84,8 @@
 bool CopyFormatEntryInt32(const char* key, AMediaFormat* from, AMediaFormat* to);
 bool CopyFormatEntryFloat(const char* key, AMediaFormat* from, AMediaFormat* to);
 
-void CopyFormatEntries(AMediaFormat* from, AMediaFormat* to, const EntryCopier* entries,
-                       size_t entryCount);
+void CopyFormatEntries(AMediaFormat* from, AMediaFormat* to,
+                       const std::vector<EntryCopier>& entries);
 
 bool SetDefaultFormatValueFloat(const char* key, AMediaFormat* format, float value);
 bool SetDefaultFormatValueInt32(const char* key, AMediaFormat* format, int32_t value);
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index a78e6d2..01190b5 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -644,6 +644,10 @@
                 0,
                 dstBpp(),
                 mCaptureLayer != nullptr /*allocRotated*/);
+        if (frameMem == nullptr) {
+            return NO_MEMORY;
+        }
+
         mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
 
         setFrame(frameMem);
@@ -886,6 +890,11 @@
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
                 trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
+
+        if (frameMem == nullptr) {
+            return NO_MEMORY;
+        }
+
         mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
 
         setFrame(frameMem);
diff --git a/media/tests/SampleVideoEncoder/app/Android.bp b/media/tests/SampleVideoEncoder/app/Android.bp
index 35fe0d8..3a66955 100644
--- a/media/tests/SampleVideoEncoder/app/Android.bp
+++ b/media/tests/SampleVideoEncoder/app/Android.bp
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
 android_app {
     name: "SampleVideoEncoder",
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
index e6eef24..ab33b38 100644
--- a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -44,7 +44,7 @@
 
     bool equals(const sp<PolicyAudioPort> &right) const
     {
-        return getTagName() == right->getTagName();
+        return right != 0 && getTagName() == right->getTagName();
     }
 
     virtual sp<AudioPort> asAudioPort() const = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index c8e4e76..866417e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -39,7 +39,7 @@
 bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
                                const sp<PolicyAudioPort> &dstPort) const
 {
-    if (mSink == 0 || dstPort == 0 || !dstPort->equals(mSink)) {
+    if (mSink == 0 || srcPort == 0 || dstPort == 0 || !dstPort->equals(mSink)) {
         return false;
     }
     ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().c_str());
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 35a06d8..24d4611 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2611,6 +2611,7 @@
         config.input_is_multi_resolution = mIsInputStreamMultiResolution;
     }
 
+    mGroupIdPhysicalCameraMap.clear();
     for (size_t i = 0; i < mOutputStreams.size(); i++) {
 
         // Don't configure bidi streams twice, nor add them twice to the list
@@ -2644,6 +2645,12 @@
                         __FUNCTION__, outputStream->data_space);
             }
         }
+
+        if (mOutputStreams[i]->isMultiResolution()) {
+            int32_t streamGroupId = mOutputStreams[i]->getHalStreamGroupId();
+            const String8& physicalCameraId = mOutputStreams[i]->getPhysicalCameraId();
+            mGroupIdPhysicalCameraMap[streamGroupId].insert(physicalCameraId);
+        }
     }
 
     config.streams = streams.editArray();
@@ -2714,7 +2721,8 @@
     // Request thread needs to know to avoid using repeat-last-settings protocol
     // across configure_streams() calls
     if (notifyRequestThread) {
-        mRequestThread->configurationComplete(mIsConstrainedHighSpeedConfiguration, sessionParams);
+        mRequestThread->configurationComplete(mIsConstrainedHighSpeedConfiguration,
+                sessionParams, mGroupIdPhysicalCameraMap);
     }
 
     char value[PROPERTY_VALUE_MAX];
@@ -2887,8 +2895,9 @@
 status_t Camera3Device::registerInFlight(uint32_t frameNumber,
         int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
         bool hasAppCallback, nsecs_t maxExpectedDuration,
-        std::set<String8>& physicalCameraIds, bool isStillCapture,
-        bool isZslCapture, bool rotateAndCropAuto, const std::set<std::string>& cameraIdsWithZoom,
+        const std::set<std::set<String8>>& physicalCameraIds,
+        bool isStillCapture, bool isZslCapture, bool rotateAndCropAuto,
+        const std::set<std::string>& cameraIdsWithZoom,
         const SurfaceMap& outputSurfaces, nsecs_t requestTimeNs) {
     ATRACE_CALL();
     std::lock_guard<std::mutex> l(mInFlightLock);
@@ -3962,11 +3971,13 @@
 }
 
 void Camera3Device::RequestThread::configurationComplete(bool isConstrainedHighSpeed,
-        const CameraMetadata& sessionParams) {
+        const CameraMetadata& sessionParams,
+        const std::map<int32_t, std::set<String8>>& groupIdPhysicalCameraMap) {
     ATRACE_CALL();
     Mutex::Autolock l(mRequestLock);
     mReconfigured = true;
     mLatestSessionParams = sessionParams;
+    mGroupIdPhysicalCameraMap = groupIdPhysicalCameraMap;
     // Prepare video stream for high speed recording.
     mPrepareVideoStream = isConstrainedHighSpeed;
     mConstrainedMode = isConstrainedHighSpeed;
@@ -4725,7 +4736,7 @@
         outputBuffers->insertAt(camera_stream_buffer_t(), 0,
                 captureRequest->mOutputStreams.size());
         halRequest->output_buffers = outputBuffers->array();
-        std::set<String8> requestedPhysicalCameras;
+        std::set<std::set<String8>> requestedPhysicalCameras;
 
         sp<Camera3Device> parent = mParent.promote();
         if (parent == NULL) {
@@ -4820,8 +4831,11 @@
             }
 
             String8 physicalCameraId = outputStream->getPhysicalCameraId();
-            if (!physicalCameraId.isEmpty()) {
-                requestedPhysicalCameras.insert(physicalCameraId);
+            int32_t streamGroupId = outputStream->getHalStreamGroupId();
+            if (streamGroupId != -1 && mGroupIdPhysicalCameraMap.count(streamGroupId) == 1) {
+                requestedPhysicalCameras.insert(mGroupIdPhysicalCameraMap[streamGroupId]);
+            } else if (!physicalCameraId.isEmpty()) {
+                requestedPhysicalCameras.insert(std::set<String8>({physicalCameraId}));
             }
             halRequest->num_output_buffers++;
         }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 018dbe5..855d2e3 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -497,6 +497,8 @@
     sp<camera3::Camera3Stream> mInputStream;
     bool                       mIsInputStreamMultiResolution;
     SessionStatsBuilder        mSessionStatsBuilder;
+    // Map from stream group ID to physical cameras backing the stream group
+    std::map<int32_t, std::set<String8>> mGroupIdPhysicalCameraMap;
 
     int                        mNextStreamId;
     bool                       mNeedConfig;
@@ -800,7 +802,8 @@
          * Call after stream (re)-configuration is completed.
          */
         void     configurationComplete(bool isConstrainedHighSpeed,
-                const CameraMetadata& sessionParams);
+                const CameraMetadata& sessionParams,
+                const std::map<int32_t, std::set<String8>>& groupIdPhysicalCameraMap);
 
         /**
          * Set or clear the list of repeating requests. Does not block
@@ -1057,6 +1060,8 @@
         Vector<int32_t>    mSessionParamKeys;
         CameraMetadata     mLatestSessionParams;
 
+        std::map<int32_t, std::set<String8>> mGroupIdPhysicalCameraMap;
+
         const bool         mUseHalBufManager;
     };
     sp<RequestThread> mRequestThread;
@@ -1076,7 +1081,8 @@
 
     status_t registerInFlight(uint32_t frameNumber,
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
-            bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds,
+            bool callback, nsecs_t maxExpectedDuration,
+            const std::set<std::set<String8>>& physicalCameraIds,
             bool isStillCapture, bool isZslCapture, bool rotateAndCropAuto,
             const std::set<std::string>& cameraIdsWithZoom, const SurfaceMap& outputSurfaces,
             nsecs_t requestTimeNs);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 384c2c6..9f225d0 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -484,6 +484,20 @@
     states.inflightIntf.checkInflightMapLengthLocked();
 }
 
+// Erase the subset of physicalCameraIds that contains id
+bool erasePhysicalCameraIdSet(
+        std::set<std::set<String8>>& physicalCameraIds, const String8& id) {
+    bool found = false;
+    for (auto iter = physicalCameraIds.begin(); iter != physicalCameraIds.end(); iter++) {
+        if (iter->count(id) == 1) {
+            physicalCameraIds.erase(iter);
+            found = true;
+            break;
+        }
+    }
+    return found;
+}
+
 void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result) {
     ATRACE_CALL();
 
@@ -583,12 +597,10 @@
             }
             for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
                 String8 physicalId(result->physcam_ids[i]);
-                std::set<String8>::iterator cameraIdIter =
-                        request.physicalCameraIds.find(physicalId);
-                if (cameraIdIter != request.physicalCameraIds.end()) {
-                    request.physicalCameraIds.erase(cameraIdIter);
-                } else {
-                    SET_ERR("Total result for frame %d has already returned for camera %s",
+                bool validPhysicalCameraMetadata =
+                        erasePhysicalCameraIdSet(request.physicalCameraIds, physicalId);
+                if (!validPhysicalCameraMetadata) {
+                    SET_ERR("Unexpected total result for frame %d camera %s",
                             frameNumber, physicalId.c_str());
                     return;
                 }
@@ -1083,14 +1095,14 @@
                             errorCode) {
                         if (physicalCameraId.size() > 0) {
                             String8 cameraId(physicalCameraId);
-                            auto iter = r.physicalCameraIds.find(cameraId);
-                            if (iter == r.physicalCameraIds.end()) {
+                            bool validPhysicalCameraId =
+                                    erasePhysicalCameraIdSet(r.physicalCameraIds, cameraId);
+                            if (!validPhysicalCameraId) {
                                 ALOGE("%s: Reported result failure for physical camera device: %s "
                                         " which is not part of the respective request!",
                                         __FUNCTION__, cameraId.string());
                                 break;
                             }
-                            r.physicalCameraIds.erase(iter);
                             resultExtras.errorPhysicalCameraId = physicalCameraId;
                             physicalDeviceResultError = true;
                         }
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index a567cb4..7e6a077 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -135,6 +135,16 @@
     virtual int      getStreamSetId() const = 0;
 
     /**
+     * Is this stream part of a multi-resolution stream set
+     */
+    virtual bool     isMultiResolution() const = 0;
+
+    /**
+     * Get the HAL stream group id for a multi-resolution stream set
+     */
+    virtual int      getHalStreamGroupId() const = 0;
+
+    /**
      * Get the stream's dimensions and format
      */
     virtual uint32_t getWidth() const = 0;
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index e3aaf44..523a2c7 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -96,7 +96,10 @@
     ERROR_BUF_STRATEGY errorBufStrategy;
 
     // The physical camera ids being requested.
-    std::set<String8> physicalCameraIds;
+    // For request on a physical camera stream, the inside set contains one Id
+    // For request on a stream group containing physical camera streams, the
+    // inside set contains all stream Ids in the group.
+    std::set<std::set<String8>> physicalCameraIds;
 
     // Map of physicalCameraId <-> Metadata
     std::vector<PhysicalCaptureResultInfo> physicalMetadatas;
@@ -142,7 +145,7 @@
 
     InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
             bool hasAppCallback, nsecs_t maxDuration,
-            const std::set<String8>& physicalCameraIdSet, bool isStillCapture,
+            const std::set<std::set<String8>>& physicalCameraIdSet, bool isStillCapture,
             bool isZslCapture, bool rotateAndCropAuto, const std::set<std::string>& idsWithZoom,
             nsecs_t requestNs, const SurfaceMap& outSurfaces = SurfaceMap{}) :
             shutterTimestamp(0),
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index f7d1f6a..443d339 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -165,10 +165,18 @@
         "libmediautils",
         "libmemunreachable",
         "libprotobuf-cpp-lite",
+        "libstagefright_foundation",
         "libstatslog",
+        "libstatspull",
+        "libstatssocket",
         "libutils",
     ],
 
+    export_shared_lib_headers: [
+        "libstatspull",
+        "libstatssocket",
+    ],
+
     static_libs: [
         "libplatformprotos",
     ],
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 1a0f6a4..bfc722e 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include "MediaMetricsService.h"
+#include "iface_statsd.h"
 
 #include <pwd.h> //getpwuid
 
@@ -30,6 +31,9 @@
 #include <mediautils/MemoryLeakTrackUtil.h>
 #include <memunreachable/memunreachable.h>
 #include <private/android_filesystem_config.h> // UID
+#include <statslog.h>
+
+#include <set>
 
 namespace android {
 
@@ -200,7 +204,6 @@
 
     (void)mAudioAnalytics.submit(sitem, isTrusted);
 
-    extern bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item);
     (void)dump2Statsd(sitem);  // failure should be logged in function.
     saveItem(sitem);
     return NO_ERROR;
@@ -440,6 +443,10 @@
     std::lock_guard _l(mLock);
     // we assume the items are roughly in time order.
     mItems.emplace_back(item);
+    if (isPullable(item->getKey())) {
+        registerStatsdCallbacksIfNeeded();
+        mPullableItems[item->getKey()].emplace_back(item);
+    }
     ++mItemsFinalized;
     if (expirations(item)
             && (!mExpireFuture.valid()
@@ -486,4 +493,57 @@
     return false;
 }
 
+void MediaMetricsService::registerStatsdCallbacksIfNeeded()
+{
+    if (mStatsdRegistered.test_and_set()) {
+        return;
+    }
+    auto tag = android::util::MEDIA_DRM_ACTIVITY_INFO;
+    auto cb = MediaMetricsService::pullAtomCallback;
+    AStatsManager_setPullAtomCallback(tag, /* metadata */ nullptr, cb, this);
+}
+
+/* static */
+bool MediaMetricsService::isPullable(const std::string &key)
+{
+    static const std::set<std::string> pullableKeys{
+        "mediadrm",
+    };
+    return pullableKeys.count(key);
+}
+
+/* static */
+std::string MediaMetricsService::atomTagToKey(int32_t atomTag)
+{
+    switch (atomTag) {
+    case android::util::MEDIA_DRM_ACTIVITY_INFO:
+        return "mediadrm";
+    }
+    return {};
+}
+
+/* static */
+AStatsManager_PullAtomCallbackReturn MediaMetricsService::pullAtomCallback(
+        int32_t atomTag, AStatsEventList* data, void* cookie)
+{
+    MediaMetricsService* svc = reinterpret_cast<MediaMetricsService*>(cookie);
+    return svc->pullItems(atomTag, data);
+}
+
+AStatsManager_PullAtomCallbackReturn MediaMetricsService::pullItems(
+        int32_t atomTag, AStatsEventList* data)
+{
+    const std::string key(atomTagToKey(atomTag));
+    if (key.empty()) {
+        return AStatsManager_PULL_SKIP;
+    }
+    std::lock_guard _l(mLock);
+    for (auto &item : mPullableItems[key]) {
+        if (const auto sitem = item.lock()) {
+            dump2Statsd(sitem, data);
+        }
+    }
+    mPullableItems[key].clear();
+    return AStatsManager_PULL_SUCCESS;
+}
 } // namespace android
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/MediaMetricsService.h
index bcae397..8bc8019 100644
--- a/services/mediametrics/MediaMetricsService.h
+++ b/services/mediametrics/MediaMetricsService.h
@@ -26,6 +26,7 @@
 #include <android-base/thread_annotations.h>
 #include <android/media/BnMediaMetricsService.h>
 #include <mediautils/ServiceUtilities.h>
+#include <stats_pull_atom_callback.h>
 #include <utils/String8.h>
 
 #include "AudioAnalytics.h"
@@ -102,6 +103,15 @@
     void dumpQueue(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
     void dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
 
+    // support statsd pushed atoms
+    static bool isPullable(const std::string &key);
+    static std::string atomTagToKey(int32_t atomTag);
+    static AStatsManager_PullAtomCallbackReturn pullAtomCallback(
+            int32_t atomTag, AStatsEventList* data, void* cookie);
+    AStatsManager_PullAtomCallbackReturn pullItems(int32_t atomTag, AStatsEventList* data);
+    void registerStatsdCallbacksIfNeeded();
+    std::atomic_flag mStatsdRegistered = ATOMIC_FLAG_INIT;
+
     // The following variables accessed without mLock
 
     // limit how many records we'll retain
@@ -130,6 +140,12 @@
     // TODO: Make separate class, use segmented queue, write lock only end.
     // Note: Another analytics module might have ownership of an item longer than the log.
     std::deque<std::shared_ptr<const mediametrics::Item>> mItems GUARDED_BY(mLock);
+
+    // Queues per item key, pending to be pulled by statsd.
+    // Use weak_ptr such that a pullable item can still expire.
+    using ItemKey = std::string;
+    using WeakItemQueue = std::deque<std::weak_ptr<const mediametrics::Item>>;
+    std::unordered_map<ItemKey, WeakItemQueue> mPullableItems GUARDED_BY(mLock);
 };
 
 } // namespace android
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
index d75ded2..b03e518 100644
--- a/services/mediametrics/fuzzer/Android.bp
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -50,7 +50,10 @@
         "libmemunreachable",
         "libprotobuf-cpp-lite",
         "libstagefright",
+        "libstagefright_foundation",
         "libstatslog",
+        "libstatspull",
+        "libstatssocket",
         "libutils",
         "mediametricsservice-aidl-cpp",
     ],
diff --git a/services/mediametrics/iface_statsd.cpp b/services/mediametrics/iface_statsd.cpp
index 16204de..b7c5296 100644
--- a/services/mediametrics/iface_statsd.cpp
+++ b/services/mediametrics/iface_statsd.cpp
@@ -27,7 +27,10 @@
 #include <pthread.h>
 #include <unistd.h>
 
+#include <map>
 #include <memory>
+#include <string>
+#include <vector>
 #include <string.h>
 #include <pwd.h>
 
@@ -47,31 +50,13 @@
 
 bool enabled_statsd = true;
 
-struct statsd_hooks {
-    const char *key;
-    bool (*handler)(const mediametrics::Item *);
-};
+using statsd_pusher = bool (*)(const mediametrics::Item *);
+using statsd_puller = bool (*)(const mediametrics::Item *, AStatsEventList *);
 
-// keep this sorted, so we can do binary searches
-static constexpr struct statsd_hooks statsd_handlers[] =
-{
-    { "audiopolicy", statsd_audiopolicy },
-    { "audiorecord", statsd_audiorecord },
-    { "audiothread", statsd_audiothread },
-    { "audiotrack", statsd_audiotrack },
-    { "codec", statsd_codec},
-    { "drm.vendor.Google.WidevineCDM", statsd_widevineCDM },
-    { "drmmanager", statsd_drmmanager },
-    { "extractor", statsd_extractor },
-    { "mediadrm", statsd_mediadrm },
-    { "mediaparser", statsd_mediaparser },
-    { "nuplayer", statsd_nuplayer },
-    { "nuplayer2", statsd_nuplayer },
-    { "recorder", statsd_recorder },
-};
-
-// give me a record, i'll look at the type and upload appropriately
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item) {
+namespace {
+template<typename Handler, typename... Args>
+bool dump2StatsdInternal(const std::map<std::string, Handler>& handlers,
+        const std::shared_ptr<const mediametrics::Item>& item, Args... args) {
     if (item == nullptr) return false;
 
     // get the key
@@ -82,12 +67,39 @@
         return false;
     }
 
-    for (const auto &statsd_handler : statsd_handlers) {
-        if (key == statsd_handler.key) {
-            return statsd_handler.handler(item.get());
-        }
+    if (handlers.count(key)) {
+        return (handlers.at(key))(item.get(), args...);
     }
     return false;
 }
+} // namespace
+
+// give me a record, I'll look at the type and upload appropriately
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item) {
+    static const std::map<std::string, statsd_pusher> statsd_pushers =
+    {
+        { "audiopolicy", statsd_audiopolicy },
+        { "audiorecord", statsd_audiorecord },
+        { "audiothread", statsd_audiothread },
+        { "audiotrack", statsd_audiotrack },
+        { "codec", statsd_codec},
+        { "drmmanager", statsd_drmmanager },
+        { "extractor", statsd_extractor },
+        { "mediadrm", statsd_mediadrm },
+        { "mediaparser", statsd_mediaparser },
+        { "nuplayer", statsd_nuplayer },
+        { "nuplayer2", statsd_nuplayer },
+        { "recorder", statsd_recorder },
+    };
+    return dump2StatsdInternal(statsd_pushers, item);
+}
+
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out) {
+    static const std::map<std::string, statsd_puller> statsd_pullers =
+    {
+        { "mediadrm", statsd_mediadrm_puller },
+    };
+    return dump2StatsdInternal(statsd_pullers, item, out);
+}
 
 } // namespace android
diff --git a/services/mediametrics/iface_statsd.h b/services/mediametrics/iface_statsd.h
index 9b49556..1b6c79a 100644
--- a/services/mediametrics/iface_statsd.h
+++ b/services/mediametrics/iface_statsd.h
@@ -14,7 +14,13 @@
  * limitations under the License.
  */
 
+#include <memory>
+#include <stats_event.h>
+
 namespace android {
+namespace mediametrics {
+class Item;
+}
 
 extern bool enabled_statsd;
 
@@ -30,7 +36,12 @@
 extern bool statsd_recorder(const mediametrics::Item *);
 
 extern bool statsd_mediadrm(const mediametrics::Item *);
-extern bool statsd_widevineCDM(const mediametrics::Item *);
 extern bool statsd_drmmanager(const mediametrics::Item *);
 
+// component specific pullers
+extern bool statsd_mediadrm_puller(const mediametrics::Item *, AStatsEventList *);
+
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item);
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out);
+
 } // namespace android
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index ac58929..071c549 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "statsd_drm"
 #include <utils/Log.h>
+#include <media/stagefright/foundation/base64.h>
 
 #include <stdint.h>
 #include <inttypes.h>
@@ -37,6 +38,7 @@
 
 #include <array>
 #include <string>
+#include <vector>
 
 namespace android {
 
@@ -54,12 +56,12 @@
     (void) item->getString("vendor", &vendor);
     std::string description;
     (void) item->getString("description", &description);
-    std::string serialized_metrics;
-    (void) item->getString("serialized_metrics", &serialized_metrics);
 
     if (enabled_statsd) {
-        android::util::BytesField bf_serialized(serialized_metrics.c_str(),
-                                                serialized_metrics.size());
+        // This field is left here for backward compatibility.
+        // This field is not used anymore.
+        const std::string  kUnusedField("unused");
+        android::util::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
         android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
                                    timestamp, pkgName.c_str(), pkgVersionCode,
                                    mediaApexVersion,
@@ -67,34 +69,7 @@
                                    description.c_str(),
                                    bf_serialized);
     } else {
-        ALOGV("NOT sending: mediadrm private data (len=%zu)", serialized_metrics.size());
-    }
-
-    return true;
-}
-
-// widevineCDM
-bool statsd_widevineCDM(const mediametrics::Item *item)
-{
-    if (item == nullptr) return false;
-
-    const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
-    std::string pkgName = item->getPkgName();
-    int64_t pkgVersionCode = item->getPkgVersionCode();
-    int64_t mediaApexVersion = 0;
-
-    std::string serialized_metrics;
-    (void) item->getString("serialized_metrics", &serialized_metrics);
-
-    if (enabled_statsd) {
-        android::util::BytesField bf_serialized(serialized_metrics.c_str(),
-                                                serialized_metrics.size());
-        android::util::stats_write(android::util::MEDIAMETRICS_DRM_WIDEVINE_REPORTED,
-                                   timestamp, pkgName.c_str(), pkgVersionCode,
-                                   mediaApexVersion,
-                                   bf_serialized);
-    } else {
-        ALOGV("NOT sending: widevine private data (len=%zu)", serialized_metrics.size());
+        ALOGV("NOT sending: mediadrm data(%s, %s)", vendor.c_str(), description.c_str());
     }
 
     return true;
@@ -145,4 +120,65 @@
     return true;
 }
 
+namespace {
+std::vector<uint8_t> base64DecodeNoPad(std::string& str) {
+    if (str.empty()) {
+        return {};
+    }
+
+    switch (str.length() % 4) {
+    case 3: str += "="; break;
+    case 2: str += "=="; break;
+    case 1: str += "==="; break;
+    case 0: /* unchanged */ break;
+    }
+
+    std::vector<uint8_t> buf(str.length() / 4 * 3, 0);
+    size_t size = buf.size();
+    if (decodeBase64(buf.data(), &size, str.c_str()) && size <= buf.size()) {
+        buf.erase(buf.begin() + size, buf.end());
+        return buf;
+    }
+    return {};
+}
+} // namespace
+
+// |out| and its contents are memory-managed by statsd.
+bool statsd_mediadrm_puller(const mediametrics::Item* item, AStatsEventList* out)
+{
+    if (item == nullptr) {
+        return false;
+    }
+
+    if (!enabled_statsd) {
+        ALOGV("NOT pulling: mediadrm activity");
+        return true;
+    }
+
+    std::string serialized_metrics;
+    (void) item->getString("serialized_metrics", &serialized_metrics);
+    const auto framework_raw(base64DecodeNoPad(serialized_metrics));
+
+    std::string plugin_metrics;
+    (void) item->getString("plugin_metrics", &plugin_metrics);
+    const auto plugin_raw(base64DecodeNoPad(plugin_metrics));
+
+    std::string vendor;
+    (void) item->getString("vendor", &vendor);
+    std::string description;
+    (void) item->getString("description", &description);
+
+    // Memory for |event| is internally managed by statsd.
+    AStatsEvent* event = AStatsEventList_addStatsEvent(out);
+    AStatsEvent_setAtomId(event, android::util::MEDIA_DRM_ACTIVITY_INFO);
+    AStatsEvent_writeString(event, item->getPkgName().c_str());
+    AStatsEvent_writeInt64(event, item->getPkgVersionCode());
+    AStatsEvent_writeString(event, vendor.c_str());
+    AStatsEvent_writeString(event, description.c_str());
+    AStatsEvent_writeByteArray(event, framework_raw.data(), framework_raw.size());
+    AStatsEvent_writeByteArray(event, plugin_raw.data(), plugin_raw.size());
+    AStatsEvent_build(event);
+    return true;
+}
+
 } // namespace android
diff --git a/services/mediaresourcemanager/ResourceObserverService.cpp b/services/mediaresourcemanager/ResourceObserverService.cpp
index 9cc6fe4..4e97406 100644
--- a/services/mediaresourcemanager/ResourceObserverService.cpp
+++ b/services/mediaresourcemanager/ResourceObserverService.cpp
@@ -165,6 +165,10 @@
         return Status::fromServiceSpecificError(PERMISSION_DENIED);
     }
 
+    if (in_observer == nullptr) {
+        return Status::fromServiceSpecificError(BAD_VALUE);
+    }
+
     ::ndk::SpAIBinder binder = in_observer->asBinder();
 
     {
@@ -220,6 +224,10 @@
         return Status::fromServiceSpecificError(PERMISSION_DENIED);
     }
 
+    if (in_observer == nullptr) {
+        return Status::fromServiceSpecificError(BAD_VALUE);
+    }
+
     ::ndk::SpAIBinder binder = in_observer->asBinder();
 
     {
diff --git a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
index e3d3e78..acd9df1 100644
--- a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
@@ -182,6 +182,11 @@
     std::vector<MediaObservableFilter> filters1;
     Status status;
 
+    // Register with null observer should fail.
+    status = mObserverService->registerObserver(nullptr, filters1);
+    EXPECT_FALSE(status.isOk());
+    EXPECT_EQ(status.getServiceSpecificError(), BAD_VALUE);
+
     // Register with empty observables should fail.
     status = mObserverService->registerObserver(mTestObserver1, filters1);
     EXPECT_FALSE(status.isOk());