Merge "Fix potential null ptr after low memory" into sc-dev
diff --git a/camera/ndk/include/camera/NdkCameraWindowType.h b/camera/ndk/include/camera/NdkCameraWindowType.h
index df977da..0838fba 100644
--- a/camera/ndk/include/camera/NdkCameraWindowType.h
+++ b/camera/ndk/include/camera/NdkCameraWindowType.h
@@ -50,4 +50,6 @@
 typedef ANativeWindow ACameraWindowType;
 #endif
 
+/** @} */
+
 #endif //_NDK_CAMERA_WINDOW_TYPE_H
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 988cda9..ec0b878 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -211,7 +211,7 @@
     }
 
     auto allLogs(gLogBuf.getLogs());
-    LOG2BI("framework logs size %zu; plugin logs size %zu",
+    LOG2BD("framework logs size %zu; plugin logs size %zu",
            allLogs.size(), pluginLogs.size());
     std::copy(pluginLogs.begin(), pluginLogs.end(), std::back_inserter(allLogs));
     std::sort(allLogs.begin(), allLogs.end(),
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 6ac3510..089eb1c 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -207,6 +207,7 @@
     }
 
     infoMap.clear();
+    android::Mutex::Autolock lock(mPlayPolicyLock);
     for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
         infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
     }
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index aa9b59d..95f15ca 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,7 +262,7 @@
     void initProperties();
     void setPlayPolicy();
 
-    android::Mutex mPlayPolicyLock;
+    mutable android::Mutex mPlayPolicyLock;
     android::KeyedVector<String8, String8> mPlayPolicy;
     android::KeyedVector<String8, String8> mStringProperties;
     android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index e6e1f80..c49d5fe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -50,7 +50,7 @@
 
     relative_install_path: "hw",
 
-    cflags: ["-Wall", "-Werror"],
+    cflags: ["-Wall", "-Werror", "-Wthread-safety"],
 
     shared_libs: [
         "android.hardware.drm@1.0",
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index d278633..302dd39 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -37,6 +37,8 @@
     sp<IMemory> hidlMemory = mapMemory(base);
     ALOGE_IF(hidlMemory == nullptr, "mapMemory returns nullptr");
 
+    std::lock_guard<std::mutex> shared_buffer_lock(mSharedBufferLock);
+
     // allow mapMemory to return nullptr
     mSharedBufferMap[bufferId] = hidlMemory;
     return Void();
@@ -94,6 +96,7 @@
         return Void();
     }
 
+    std::unique_lock<std::mutex> shared_buffer_lock(mSharedBufferLock);
     if (mSharedBufferMap.find(source.bufferId) == mSharedBufferMap.end()) {
       _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
                "source decrypt buffer base not set");
@@ -142,12 +145,17 @@
 
     base = static_cast<uint8_t *>(static_cast<void *>(destBase->getPointer()));
 
-    if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
+    totalSize = 0;
+    if (__builtin_add_overflow(destBuffer.offset, destBuffer.size, &totalSize) ||
+        totalSize > destBase->getSize()) {
+        android_errorWriteLog(0x534e4554, "176444622");
         _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
         return Void();
     }
-    destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
+    destPtr = static_cast<void*>(base + destination.nonsecureMemory.offset);
 
+    // release mSharedBufferLock
+    shared_buffer_lock.unlock();
 
     // Calculate the output buffer size and determine if any subsamples are
     // encrypted.
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index a77759e..6f69110 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -220,6 +220,7 @@
         if (requestString.find(kOfflineLicense) != std::string::npos) {
             std::string emptyResponse;
             std::string keySetIdString(keySetId.begin(), keySetId.end());
+            Mutex::Autolock lock(mFileHandleLock);
             if (!mFileHandle.StoreLicense(keySetIdString,
                     DeviceFiles::kLicenseStateReleasing,
                     emptyResponse)) {
@@ -335,6 +336,7 @@
         }
         *keySetId = kKeySetIdPrefix + ByteArrayToHexString(
                 reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
+        Mutex::Autolock lock(mFileHandleLock);
         if (mFileHandle.LicenseExists(*keySetId)) {
             // collision, regenerate
             ALOGV("Retry generating KeySetId");
@@ -392,6 +394,7 @@
     if (status == Status::OK) {
         if (isOfflineLicense) {
             if (isRelease) {
+                Mutex::Autolock lock(mFileHandleLock);
                 mFileHandle.DeleteLicense(keySetId);
                 mSessionLibrary->destroySession(session);
             } else {
@@ -400,6 +403,7 @@
                     return Void();
                 }
 
+                Mutex::Autolock lock(mFileHandleLock);
                 bool ok = mFileHandle.StoreLicense(
                         keySetId,
                         DeviceFiles::kLicenseStateActive,
@@ -454,6 +458,7 @@
         DeviceFiles::LicenseState licenseState;
         std::string offlineLicense;
         Status status = Status::OK;
+        Mutex::Autolock lock(mFileHandleLock);
         if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
                 &licenseState, &offlineLicense)) {
             ALOGE("Failed to restore offline license");
@@ -576,7 +581,6 @@
 Return<void> DrmPlugin::queryKeyStatus(
         const hidl_vec<uint8_t>& sessionId,
         queryKeyStatus_cb _hidl_cb) {
-
     if (sessionId.size() == 0) {
         // Returns empty key status KeyValue pair
         _hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
@@ -586,12 +590,14 @@
     std::vector<KeyValue> infoMapVec;
     infoMapVec.clear();
 
+    mPlayPolicyLock.lock();
     KeyValue keyValuePair;
     for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
         keyValuePair.key = mPlayPolicy[i].key;
         keyValuePair.value = mPlayPolicy[i].value;
         infoMapVec.push_back(keyValuePair);
     }
+    mPlayPolicyLock.unlock();
     _hidl_cb(Status::OK, toHidlVec(infoMapVec));
     return Void();
 }
@@ -704,6 +710,8 @@
 }
 
 Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
+    Mutex::Autolock lock(mFileHandleLock);
+
     std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
     std::vector<KeySetId> keySetIds;
     if (mMockError != Status_V1_2::OK) {
@@ -724,6 +732,7 @@
         return toStatus_1_0(mMockError);
     }
     std::string licenseName(keySetId.begin(), keySetId.end());
+    Mutex::Autolock lock(mFileHandleLock);
     if (mFileHandle.DeleteLicense(licenseName)) {
         return Status::OK;
     }
@@ -732,6 +741,8 @@
 
 Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
         getOfflineLicenseState_cb _hidl_cb) {
+    Mutex::Autolock lock(mFileHandleLock);
+
     std::string licenseName(keySetId.begin(), keySetId.end());
     DeviceFiles::LicenseState state;
     std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index 051a968..32cf2dc 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,11 +24,13 @@
 }
 
 bool MemoryFileSystem::FileExists(const std::string& fileName) const {
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     return result != mMemoryFileSystem.end();
 }
 
 ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     if (result != mMemoryFileSystem.end()) {
         return static_cast<ssize_t>(result->second.getFileSize());
@@ -40,6 +42,7 @@
 
 std::vector<std::string> MemoryFileSystem::ListFiles() const {
     std::vector<std::string> list;
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     for (const auto& filename : mMemoryFileSystem) {
         list.push_back(filename.first);
     }
@@ -48,6 +51,7 @@
 
 size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
     std::string key = GetFileName(path);
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(key);
     if (result != mMemoryFileSystem.end()) {
         std::string serializedHashFile = result->second.getContent();
@@ -61,6 +65,7 @@
 
 size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
     std::string key = GetFileName(path);
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(key);
     if (result != mMemoryFileSystem.end()) {
         mMemoryFileSystem.erase(key);
@@ -70,6 +75,7 @@
 }
 
 bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     if (result != mMemoryFileSystem.end()) {
         mMemoryFileSystem.erase(result);
@@ -81,6 +87,7 @@
 }
 
 bool MemoryFileSystem::RemoveAllFiles() {
+    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     mMemoryFileSystem.clear();
     return mMemoryFileSystem.empty();
 }
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
index 8680f0c..23a64fa 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
@@ -20,6 +20,8 @@
 #include <android/hardware/drm/1.2/ICryptoPlugin.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 
+#include <mutex>
+
 #include "ClearKeyTypes.h"
 #include "Session.h"
 #include "Utils.h"
@@ -93,7 +95,7 @@
             const SharedBuffer& source,
             uint64_t offset,
             const DestinationBuffer& destination,
-            decrypt_1_2_cb _hidl_cb);
+            decrypt_1_2_cb _hidl_cb) NO_THREAD_SAFETY_ANALYSIS; // use unique_lock
 
     Return<void> setSharedBufferBase(const hidl_memory& base,
             uint32_t bufferId);
@@ -105,7 +107,8 @@
 private:
     CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoPlugin);
 
-    std::map<uint32_t, sp<IMemory> > mSharedBufferMap;
+    std::mutex mSharedBufferLock;
+    std::map<uint32_t, sp<IMemory>> mSharedBufferMap GUARDED_BY(mSharedBufferLock);
     sp<Session> mSession;
     Status mInitStatus;
 };
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 076beb8..894985b 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -416,7 +416,8 @@
         mMockError = Status_V1_2::OK;
     }
 
-    DeviceFiles mFileHandle;
+    DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
+    Mutex mFileHandleLock;
     Mutex mSecureStopLock;
 
     CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index bcd9fd6..6ac0e2c 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,7 +5,9 @@
 #ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
 #define CLEARKEY_MEMORY_FILE_SYSTEM_H_
 
+#include <android-base/thread_annotations.h>
 #include <map>
+#include <mutex>
 #include <string>
 
 #include "ClearKeyTypes.h"
@@ -49,10 +51,12 @@
     size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
 
  private:
+    mutable std::mutex mMemoryFileSystemLock;
+
     // License file name is made up of a unique keySetId, therefore,
     // the filename can be used as the key to locate licenses in the
     // memory file system.
-    std::map<std::string, MemoryFile> mMemoryFileSystem;
+    std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
 
     std::string GetFileName(const std::string& path);
 
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 0207311..f4a6e17 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -22,11 +22,11 @@
 
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
 #include <Codec2Mapper.h>
 #include <SimpleC2Interface.h>
 
 #include "C2SoftAvcDec.h"
-#include "ih264d.h"
 
 namespace android {
 
@@ -332,6 +332,14 @@
     free(mem);
 }
 
+static IV_COLOR_FORMAT_T GetIvColorFormat() {
+    static IV_COLOR_FORMAT_T sColorFormat =
+        (GetYuv420FlexibleLayout() == FLEX_LAYOUT_SEMIPLANAR_UV) ? IV_YUV_420SP_UV :
+        (GetYuv420FlexibleLayout() == FLEX_LAYOUT_SEMIPLANAR_VU) ? IV_YUV_420SP_VU :
+        IV_YUV_420P;
+    return sColorFormat;
+}
+
 C2SoftAvcDec::C2SoftAvcDec(
         const char *name,
         c2_node_id_t id,
@@ -340,7 +348,6 @@
       mIntf(intfImpl),
       mDecHandle(nullptr),
       mOutBufferFlush(nullptr),
-      mIvColorFormat(IV_YUV_420P),
       mOutputDelay(kDefaultOutputDelay),
       mWidth(320),
       mHeight(240),
@@ -391,12 +398,14 @@
     }
 
     while (true) {
-        ivd_video_decode_ip_t s_decode_ip;
-        ivd_video_decode_op_t s_decode_op;
+        ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+        ih264d_video_decode_op_t s_h264d_decode_op = {};
+        ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+        ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
 
-        setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, nullptr, 0, 0, 0);
-        (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
-        if (0 == s_decode_op.u4_output_present) {
+        setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, nullptr, 0, 0, 0);
+        (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
+        if (0 == ps_decode_op->u4_output_present) {
             resetPlugin();
             break;
         }
@@ -411,13 +420,19 @@
 }
 
 status_t C2SoftAvcDec::createDecoder() {
-    ivdext_create_ip_t s_create_ip;
-    ivdext_create_op_t s_create_op;
+    ivdext_create_ip_t s_create_ip = {};
+    ivdext_create_op_t s_create_op = {};
 
     s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
     s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
     s_create_ip.s_ivd_create_ip_t.u4_share_disp_buf = 0;
-    s_create_ip.s_ivd_create_ip_t.e_output_format = mIvColorFormat;
+    s_create_ip.s_ivd_create_ip_t.e_output_format = GetIvColorFormat();
+    switch (s_create_ip.s_ivd_create_ip_t.e_output_format) {
+        case IV_YUV_420P:       ALOGD("Flex Planar");           break;
+        case IV_YUV_420SP_UV:   ALOGD("Flex Semi-planar UV");   break;
+        case IV_YUV_420SP_VU:   ALOGD("Flex Semi-planar VU");   break;
+        default:                ALOGD("Unknown");               break;
+    }
     s_create_ip.s_ivd_create_ip_t.pf_aligned_alloc = ivd_aligned_malloc;
     s_create_ip.s_ivd_create_ip_t.pf_aligned_free = ivd_aligned_free;
     s_create_ip.s_ivd_create_ip_t.pv_mem_ctxt = nullptr;
@@ -438,8 +453,8 @@
 }
 
 status_t C2SoftAvcDec::setNumCores() {
-    ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip;
-    ivdext_ctl_set_num_cores_op_t s_set_num_cores_op;
+    ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip = {};
+    ivdext_ctl_set_num_cores_op_t s_set_num_cores_op = {};
 
     s_set_num_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
     s_set_num_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -458,22 +473,26 @@
 }
 
 status_t C2SoftAvcDec::setParams(size_t stride, IVD_VIDEO_DECODE_MODE_T dec_mode) {
-    ivd_ctl_set_config_ip_t s_set_dyn_params_ip;
-    ivd_ctl_set_config_op_t s_set_dyn_params_op;
+    ih264d_ctl_set_config_ip_t s_h264d_set_dyn_params_ip = {};
+    ih264d_ctl_set_config_op_t s_h264d_set_dyn_params_op = {};
+    ivd_ctl_set_config_ip_t *ps_set_dyn_params_ip =
+        &s_h264d_set_dyn_params_ip.s_ivd_ctl_set_config_ip_t;
+    ivd_ctl_set_config_op_t *ps_set_dyn_params_op =
+        &s_h264d_set_dyn_params_op.s_ivd_ctl_set_config_op_t;
 
-    s_set_dyn_params_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
-    s_set_dyn_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
-    s_set_dyn_params_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
-    s_set_dyn_params_ip.u4_disp_wd = (UWORD32) stride;
-    s_set_dyn_params_ip.e_frm_skip_mode = IVD_SKIP_NONE;
-    s_set_dyn_params_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
-    s_set_dyn_params_ip.e_vid_dec_mode = dec_mode;
-    s_set_dyn_params_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+    ps_set_dyn_params_ip->u4_size = sizeof(ih264d_ctl_set_config_ip_t);
+    ps_set_dyn_params_ip->e_cmd = IVD_CMD_VIDEO_CTL;
+    ps_set_dyn_params_ip->e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+    ps_set_dyn_params_ip->u4_disp_wd = (UWORD32) stride;
+    ps_set_dyn_params_ip->e_frm_skip_mode = IVD_SKIP_NONE;
+    ps_set_dyn_params_ip->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+    ps_set_dyn_params_ip->e_vid_dec_mode = dec_mode;
+    ps_set_dyn_params_op->u4_size = sizeof(ih264d_ctl_set_config_op_t);
     IV_API_CALL_STATUS_T status = ivdec_api_function(mDecHandle,
-                                                     &s_set_dyn_params_ip,
-                                                     &s_set_dyn_params_op);
+                                                     &s_h264d_set_dyn_params_ip,
+                                                     &s_h264d_set_dyn_params_op);
     if (status != IV_SUCCESS) {
-        ALOGE("error in %s: 0x%x", __func__, s_set_dyn_params_op.u4_error_code);
+        ALOGE("error in %s: 0x%x", __func__, ps_set_dyn_params_op->u4_error_code);
         return UNKNOWN_ERROR;
     }
 
@@ -481,8 +500,8 @@
 }
 
 void C2SoftAvcDec::getVersion() {
-    ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip;
-    ivd_ctl_getversioninfo_op_t s_get_versioninfo_op;
+    ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip = {};
+    ivd_ctl_getversioninfo_op_t s_get_versioninfo_op = {};
     UWORD8 au1_buf[512];
 
     s_get_versioninfo_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
@@ -538,7 +557,7 @@
         if (OK != setParams(mStride, IVD_DECODE_FRAME)) return false;
     }
 
-    ps_decode_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+    ps_decode_ip->u4_size = sizeof(ih264d_video_decode_ip_t);
     ps_decode_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
     if (inBuffer) {
         ps_decode_ip->u4_ts = tsMarker;
@@ -550,8 +569,12 @@
         ps_decode_ip->u4_num_Bytes = 0;
     }
     ps_decode_ip->s_out_buffer.u4_min_out_buf_size[0] = lumaSize;
-    ps_decode_ip->s_out_buffer.u4_min_out_buf_size[1] = chromaSize;
-    ps_decode_ip->s_out_buffer.u4_min_out_buf_size[2] = chromaSize;
+    if (GetIvColorFormat() == IV_YUV_420P) {
+        ps_decode_ip->s_out_buffer.u4_min_out_buf_size[1] = chromaSize;
+        ps_decode_ip->s_out_buffer.u4_min_out_buf_size[2] = chromaSize;
+    } else {
+        ps_decode_ip->s_out_buffer.u4_min_out_buf_size[1] = chromaSize * 2;
+    }
     if (outBuffer) {
         if (outBuffer->height() < displayHeight) {
             ALOGE("Output buffer too small: provided (%dx%d) required (%ux%u)",
@@ -560,21 +583,31 @@
         }
         ps_decode_ip->s_out_buffer.pu1_bufs[0] = outBuffer->data()[C2PlanarLayout::PLANE_Y];
         ps_decode_ip->s_out_buffer.pu1_bufs[1] = outBuffer->data()[C2PlanarLayout::PLANE_U];
-        ps_decode_ip->s_out_buffer.pu1_bufs[2] = outBuffer->data()[C2PlanarLayout::PLANE_V];
+        if (GetIvColorFormat() == IV_YUV_420P) {
+            ps_decode_ip->s_out_buffer.pu1_bufs[2] = outBuffer->data()[C2PlanarLayout::PLANE_V];
+        } else if (GetIvColorFormat() == IV_YUV_420SP_VU) {
+            ps_decode_ip->s_out_buffer.pu1_bufs[1] = outBuffer->data()[C2PlanarLayout::PLANE_V];
+        }
     } else {
         ps_decode_ip->s_out_buffer.pu1_bufs[0] = mOutBufferFlush;
         ps_decode_ip->s_out_buffer.pu1_bufs[1] = mOutBufferFlush + lumaSize;
-        ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
+        if (GetIvColorFormat() == IV_YUV_420P) {
+            ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
+        }
     }
-    ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
-    ps_decode_op->u4_size = sizeof(ivd_video_decode_op_t);
+    if (GetIvColorFormat() == IV_YUV_420P) {
+        ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
+    } else {
+        ps_decode_ip->s_out_buffer.u4_num_bufs = 2;
+    }
+    ps_decode_op->u4_size = sizeof(ih264d_video_decode_op_t);
 
     return true;
 }
 
 bool C2SoftAvcDec::getVuiParams() {
-    ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip;
-    ivdext_ctl_get_vui_params_op_t s_get_vui_params_op;
+    ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip = {};
+    ivdext_ctl_get_vui_params_op_t s_get_vui_params_op = {};
 
     s_get_vui_params_ip.u4_size = sizeof(ivdext_ctl_get_vui_params_ip_t);
     s_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -622,8 +655,8 @@
 }
 
 status_t C2SoftAvcDec::setFlushMode() {
-    ivd_ctl_flush_ip_t s_set_flush_ip;
-    ivd_ctl_flush_op_t s_set_flush_op;
+    ivd_ctl_flush_ip_t s_set_flush_ip = {};
+    ivd_ctl_flush_op_t s_set_flush_op = {};
 
     s_set_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
     s_set_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -641,8 +674,8 @@
 }
 
 status_t C2SoftAvcDec::resetDecoder() {
-    ivd_ctl_reset_ip_t s_reset_ip;
-    ivd_ctl_reset_op_t s_reset_op;
+    ivd_ctl_reset_ip_t s_reset_ip = {};
+    ivd_ctl_reset_op_t s_reset_op = {};
 
     s_reset_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
     s_reset_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -671,8 +704,8 @@
 
 status_t C2SoftAvcDec::deleteDecoder() {
     if (mDecHandle) {
-        ivdext_delete_ip_t s_delete_ip;
-        ivdext_delete_op_t s_delete_op;
+        ivdext_delete_ip_t s_delete_ip = {};
+        ivdext_delete_op_t s_delete_op = {};
 
         s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
         s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
@@ -776,7 +809,7 @@
         mOutBlock.reset();
     }
     if (!mOutBlock) {
-        uint32_t format = HAL_PIXEL_FORMAT_YV12;
+        uint32_t format = HAL_PIXEL_FORMAT_YCBCR_420_888;
         C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
         c2_status_t err =
             pool->fetchGraphicBlock(ALIGN32(mWidth), mHeight, format, usage, &mOutBlock);
@@ -792,8 +825,6 @@
 }
 
 // TODO: can overall error checking be improved?
-// TODO: allow configuration of color format and usage for graphic buffers instead
-//       of hard coding them to HAL_PIXEL_FORMAT_YV12
 // TODO: pass coloraspects information to surface
 // TODO: test support for dynamic change in resolution
 // TODO: verify if the decoder sent back all frames
@@ -837,8 +868,10 @@
             return;
         }
 
-        ivd_video_decode_ip_t s_decode_ip;
-        ivd_video_decode_op_t s_decode_op;
+        ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+        ih264d_video_decode_op_t s_h264d_decode_op = {};
+        ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+        ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
         {
             C2GraphicView wView = mOutBlock->map().get();
             if (wView.error()) {
@@ -846,7 +879,7 @@
                 work->result = wView.error();
                 return;
             }
-            if (!setDecodeArgs(&s_decode_ip, &s_decode_op, &rView, &wView,
+            if (!setDecodeArgs(ps_decode_ip, ps_decode_op, &rView, &wView,
                                inOffset + inPos, inSize - inPos, workIndex)) {
                 mSignalledError = true;
                 work->workletsProcessed = 1u;
@@ -862,26 +895,27 @@
             WORD32 delay;
             GETTIME(&mTimeStart, nullptr);
             TIME_DIFF(mTimeEnd, mTimeStart, delay);
-            (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+            (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
             WORD32 decodeTime;
             GETTIME(&mTimeEnd, nullptr);
             TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
             ALOGV("decodeTime=%6d delay=%6d numBytes=%6d", decodeTime, delay,
-                  s_decode_op.u4_num_bytes_consumed);
+                  ps_decode_op->u4_num_bytes_consumed);
         }
-        if (IVD_MEM_ALLOC_FAILED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+        if (IVD_MEM_ALLOC_FAILED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
             ALOGE("allocation failure in decoder");
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
             return;
-        } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+        } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED ==
+                (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
             ALOGE("unsupported resolution : %dx%d", mWidth, mHeight);
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
             return;
-        } else if (IVD_RES_CHANGED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+        } else if (IVD_RES_CHANGED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
             ALOGV("resolution changed");
             drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
             resetDecoder();
@@ -890,16 +924,16 @@
 
             /* Decode header and get new dimensions */
             setParams(mStride, IVD_DECODE_HEADER);
-            (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
-        } else if (IS_IVD_FATAL_ERROR(s_decode_op.u4_error_code)) {
-            ALOGE("Fatal error in decoder 0x%x", s_decode_op.u4_error_code);
+            (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+        } else if (IS_IVD_FATAL_ERROR(ps_decode_op->u4_error_code)) {
+            ALOGE("Fatal error in decoder 0x%x", ps_decode_op->u4_error_code);
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
             return;
         }
-        if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
-            mOutputDelay = s_decode_op.i4_reorder_depth;
+        if (ps_decode_op->i4_reorder_depth >= 0 && mOutputDelay != ps_decode_op->i4_reorder_depth) {
+            mOutputDelay = ps_decode_op->i4_reorder_depth;
             ALOGV("New Output delay %d ", mOutputDelay);
 
             C2PortActualDelayTuning::output outputDelay(mOutputDelay);
@@ -917,16 +951,16 @@
                 return;
             }
         }
-        if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
+        if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
             if (mHeaderDecoded == false) {
                 mHeaderDecoded = true;
-                mStride = ALIGN32(s_decode_op.u4_pic_wd);
+                mStride = ALIGN32(ps_decode_op->u4_pic_wd);
                 setParams(mStride, IVD_DECODE_FRAME);
             }
-            if (s_decode_op.u4_pic_wd != mWidth || s_decode_op.u4_pic_ht != mHeight) {
-                mWidth = s_decode_op.u4_pic_wd;
-                mHeight = s_decode_op.u4_pic_ht;
-                CHECK_EQ(0u, s_decode_op.u4_output_present);
+            if (ps_decode_op->u4_pic_wd != mWidth || ps_decode_op->u4_pic_ht != mHeight) {
+                mWidth = ps_decode_op->u4_pic_wd;
+                mHeight = ps_decode_op->u4_pic_ht;
+                CHECK_EQ(0u, ps_decode_op->u4_output_present);
 
                 C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
                 std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -945,11 +979,11 @@
             }
         }
         (void)getVuiParams();
-        hasPicture |= (1 == s_decode_op.u4_frame_decoded_flag);
-        if (s_decode_op.u4_output_present) {
-            finishWork(s_decode_op.u4_ts, work);
+        hasPicture |= (1 == ps_decode_op->u4_frame_decoded_flag);
+        if (ps_decode_op->u4_output_present) {
+            finishWork(ps_decode_op->u4_ts, work);
         }
-        inPos += s_decode_op.u4_num_bytes_consumed;
+        inPos += ps_decode_op->u4_num_bytes_consumed;
     }
     if (eos) {
         drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -987,16 +1021,18 @@
             ALOGE("graphic view map failed %d", wView.error());
             return C2_CORRUPTED;
         }
-        ivd_video_decode_ip_t s_decode_ip;
-        ivd_video_decode_op_t s_decode_op;
-        if (!setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, &wView, 0, 0, 0)) {
+        ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+        ih264d_video_decode_op_t s_h264d_decode_op = {};
+        ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+        ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
+        if (!setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, &wView, 0, 0, 0)) {
             mSignalledError = true;
             work->workletsProcessed = 1u;
             return C2_CORRUPTED;
         }
-        (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
-        if (s_decode_op.u4_output_present) {
-            finishWork(s_decode_op.u4_ts, work);
+        (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
+        if (ps_decode_op->u4_output_present) {
+            finishWork(ps_decode_op->u4_ts, work);
         } else {
             fillEmptyWork(work);
             break;
diff --git a/media/codec2/components/avc/C2SoftAvcDec.h b/media/codec2/components/avc/C2SoftAvcDec.h
index bd84de0..ed99ad1 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.h
+++ b/media/codec2/components/avc/C2SoftAvcDec.h
@@ -25,8 +25,7 @@
 #include <SimpleC2Component.h>
 
 #include "ih264_typedefs.h"
-#include "iv.h"
-#include "ivd.h"
+#include "ih264d.h"
 
 namespace android {
 
@@ -156,7 +155,6 @@
     uint8_t *mOutBufferFlush;
 
     size_t mNumCores;
-    IV_COLOR_FORMAT_T mIvColorFormat;
     uint32_t mOutputDelay;
     uint32_t mWidth;
     uint32_t mHeight;
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index bf9e5ff..fc5b75d 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -454,11 +454,19 @@
 
 }  // namespace
 
+static IV_COLOR_FORMAT_T GetIvColorFormat() {
+    static IV_COLOR_FORMAT_T sColorFormat =
+        (GetYuv420FlexibleLayout() == FLEX_LAYOUT_SEMIPLANAR_UV) ? IV_YUV_420SP_UV :
+        (GetYuv420FlexibleLayout() == FLEX_LAYOUT_SEMIPLANAR_VU) ? IV_YUV_420SP_VU :
+        IV_YUV_420P;
+    return sColorFormat;
+}
+
 C2SoftAvcEnc::C2SoftAvcEnc(
         const char *name, c2_node_id_t id, const std::shared_ptr<IntfImpl> &intfImpl)
     : SimpleC2Component(std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
       mIntf(intfImpl),
-      mIvVideoColorFormat(IV_YUV_420P),
+      mIvVideoColorFormat(GetIvColorFormat()),
       mAVCEncProfile(IV_PROFILE_BASE),
       mAVCEncLevel(41),
       mStarted(false),
@@ -1026,8 +1034,7 @@
     // Assume worst case output buffer size to be equal to number of bytes in input
     mOutBufferSize = std::max(width * height * 3 / 2, kMinOutBufferSize);
 
-    // TODO
-    mIvVideoColorFormat = IV_YUV_420P;
+    mIvVideoColorFormat = GetIvColorFormat();
 
     ALOGD("Params width %d height %d level %d colorFormat %d bframes %d", width,
             height, mAVCEncLevel, mIvVideoColorFormat, mBframes);
@@ -1082,29 +1089,31 @@
 
     /* Getting MemRecords Attributes */
     {
-        iv_fill_mem_rec_ip_t s_fill_mem_rec_ip;
-        iv_fill_mem_rec_op_t s_fill_mem_rec_op;
+        ih264e_fill_mem_rec_ip_t s_ih264e_mem_rec_ip = {};
+        ih264e_fill_mem_rec_op_t s_ih264e_mem_rec_op = {};
+        iv_fill_mem_rec_ip_t *ps_fill_mem_rec_ip = &s_ih264e_mem_rec_ip.s_ive_ip;
+        iv_fill_mem_rec_op_t *ps_fill_mem_rec_op = &s_ih264e_mem_rec_op.s_ive_op;
 
-        s_fill_mem_rec_ip.u4_size = sizeof(iv_fill_mem_rec_ip_t);
-        s_fill_mem_rec_op.u4_size = sizeof(iv_fill_mem_rec_op_t);
+        ps_fill_mem_rec_ip->u4_size = sizeof(ih264e_fill_mem_rec_ip_t);
+        ps_fill_mem_rec_op->u4_size = sizeof(ih264e_fill_mem_rec_op_t);
 
-        s_fill_mem_rec_ip.e_cmd = IV_CMD_FILL_NUM_MEM_REC;
-        s_fill_mem_rec_ip.ps_mem_rec = mMemRecords;
-        s_fill_mem_rec_ip.u4_num_mem_rec = mNumMemRecords;
-        s_fill_mem_rec_ip.u4_max_wd = width;
-        s_fill_mem_rec_ip.u4_max_ht = height;
-        s_fill_mem_rec_ip.u4_max_level = mAVCEncLevel;
-        s_fill_mem_rec_ip.e_color_format = DEFAULT_INP_COLOR_FORMAT;
-        s_fill_mem_rec_ip.u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
-        s_fill_mem_rec_ip.u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
-        s_fill_mem_rec_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
-        s_fill_mem_rec_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
+        ps_fill_mem_rec_ip->e_cmd = IV_CMD_FILL_NUM_MEM_REC;
+        ps_fill_mem_rec_ip->ps_mem_rec = mMemRecords;
+        ps_fill_mem_rec_ip->u4_num_mem_rec = mNumMemRecords;
+        ps_fill_mem_rec_ip->u4_max_wd = width;
+        ps_fill_mem_rec_ip->u4_max_ht = height;
+        ps_fill_mem_rec_ip->u4_max_level = mAVCEncLevel;
+        ps_fill_mem_rec_ip->e_color_format = DEFAULT_INP_COLOR_FORMAT;
+        ps_fill_mem_rec_ip->u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
+        ps_fill_mem_rec_ip->u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
+        ps_fill_mem_rec_ip->u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
+        ps_fill_mem_rec_ip->u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
 
-        status = ive_api_function(nullptr, &s_fill_mem_rec_ip, &s_fill_mem_rec_op);
+        status = ive_api_function(nullptr, &s_ih264e_mem_rec_ip, &s_ih264e_mem_rec_op);
 
         if (status != IV_SUCCESS) {
             ALOGE("Fill memory records failed = 0x%x\n",
-                    s_fill_mem_rec_op.u4_error_code);
+                    ps_fill_mem_rec_op->u4_error_code);
             return C2_CORRUPTED;
         }
     }
@@ -1133,48 +1142,51 @@
 
     /* Codec Instance Creation */
     {
-        ive_init_ip_t s_init_ip;
-        ive_init_op_t s_init_op;
+        ih264e_init_ip_t s_enc_ip = {};
+        ih264e_init_op_t s_enc_op = {};
+
+        ive_init_ip_t *ps_init_ip = &s_enc_ip.s_ive_ip;
+        ive_init_op_t *ps_init_op = &s_enc_op.s_ive_op;
 
         mCodecCtx = (iv_obj_t *)mMemRecords[0].pv_base;
         mCodecCtx->u4_size = sizeof(iv_obj_t);
         mCodecCtx->pv_fxns = (void *)ive_api_function;
 
-        s_init_ip.u4_size = sizeof(ive_init_ip_t);
-        s_init_op.u4_size = sizeof(ive_init_op_t);
+        ps_init_ip->u4_size = sizeof(ih264e_init_ip_t);
+        ps_init_op->u4_size = sizeof(ih264e_init_op_t);
 
-        s_init_ip.e_cmd = IV_CMD_INIT;
-        s_init_ip.u4_num_mem_rec = mNumMemRecords;
-        s_init_ip.ps_mem_rec = mMemRecords;
-        s_init_ip.u4_max_wd = width;
-        s_init_ip.u4_max_ht = height;
-        s_init_ip.u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
-        s_init_ip.u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
-        s_init_ip.u4_max_level = mAVCEncLevel;
-        s_init_ip.e_inp_color_fmt = mIvVideoColorFormat;
+        ps_init_ip->e_cmd = IV_CMD_INIT;
+        ps_init_ip->u4_num_mem_rec = mNumMemRecords;
+        ps_init_ip->ps_mem_rec = mMemRecords;
+        ps_init_ip->u4_max_wd = width;
+        ps_init_ip->u4_max_ht = height;
+        ps_init_ip->u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
+        ps_init_ip->u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
+        ps_init_ip->u4_max_level = mAVCEncLevel;
+        ps_init_ip->e_inp_color_fmt = mIvVideoColorFormat;
 
         if (mReconEnable || mPSNREnable) {
-            s_init_ip.u4_enable_recon = 1;
+            ps_init_ip->u4_enable_recon = 1;
         } else {
-            s_init_ip.u4_enable_recon = 0;
+            ps_init_ip->u4_enable_recon = 0;
         }
-        s_init_ip.e_recon_color_fmt = DEFAULT_RECON_COLOR_FORMAT;
-        s_init_ip.e_rc_mode = DEFAULT_RC_MODE;
-        s_init_ip.u4_max_framerate = DEFAULT_MAX_FRAMERATE;
-        s_init_ip.u4_max_bitrate = DEFAULT_MAX_BITRATE;
-        s_init_ip.u4_num_bframes = mBframes;
-        s_init_ip.e_content_type = IV_PROGRESSIVE;
-        s_init_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
-        s_init_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
-        s_init_ip.e_slice_mode = mSliceMode;
-        s_init_ip.u4_slice_param = mSliceParam;
-        s_init_ip.e_arch = mArch;
-        s_init_ip.e_soc = DEFAULT_SOC;
+        ps_init_ip->e_recon_color_fmt = DEFAULT_RECON_COLOR_FORMAT;
+        ps_init_ip->e_rc_mode = DEFAULT_RC_MODE;
+        ps_init_ip->u4_max_framerate = DEFAULT_MAX_FRAMERATE;
+        ps_init_ip->u4_max_bitrate = DEFAULT_MAX_BITRATE;
+        ps_init_ip->u4_num_bframes = mBframes;
+        ps_init_ip->e_content_type = IV_PROGRESSIVE;
+        ps_init_ip->u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
+        ps_init_ip->u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
+        ps_init_ip->e_slice_mode = mSliceMode;
+        ps_init_ip->u4_slice_param = mSliceParam;
+        ps_init_ip->e_arch = mArch;
+        ps_init_ip->e_soc = DEFAULT_SOC;
 
-        status = ive_api_function(mCodecCtx, &s_init_ip, &s_init_op);
+        status = ive_api_function(mCodecCtx, &s_enc_ip, &s_enc_op);
 
         if (status != IV_SUCCESS) {
-            ALOGE("Init encoder failed = 0x%x\n", s_init_op.u4_error_code);
+            ALOGE("Init encoder failed = 0x%x\n", ps_init_op->u4_error_code);
             return C2_CORRUPTED;
         }
     }
@@ -1320,7 +1332,6 @@
               mSize->width, input->height(), mSize->height);
         return C2_BAD_VALUE;
     }
-    ALOGV("width = %d, height = %d", input->width(), input->height());
     const C2PlanarLayout &layout = input->layout();
     uint8_t *yPlane = const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_Y]);
     uint8_t *uPlane = const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_U]);
@@ -1357,7 +1368,8 @@
                 return C2_BAD_VALUE;
             }
 
-            if (layout.planes[layout.PLANE_Y].colInc == 1
+            if (mIvVideoColorFormat == IV_YUV_420P
+                    && layout.planes[layout.PLANE_Y].colInc == 1
                     && layout.planes[layout.PLANE_U].colInc == 1
                     && layout.planes[layout.PLANE_V].colInc == 1
                     && uStride == vStride
@@ -1365,21 +1377,61 @@
                 // I420 compatible - already set up above
                 break;
             }
+            if (mIvVideoColorFormat == IV_YUV_420SP_UV
+                    && layout.planes[layout.PLANE_Y].colInc == 1
+                    && layout.planes[layout.PLANE_U].colInc == 2
+                    && layout.planes[layout.PLANE_V].colInc == 2
+                    && uStride == vStride
+                    && yStride == vStride
+                    && uPlane + 1 == vPlane) {
+                // NV12 compatible - already set up above
+                break;
+            }
+            if (mIvVideoColorFormat == IV_YUV_420SP_VU
+                    && layout.planes[layout.PLANE_Y].colInc == 1
+                    && layout.planes[layout.PLANE_U].colInc == 2
+                    && layout.planes[layout.PLANE_V].colInc == 2
+                    && uStride == vStride
+                    && yStride == vStride
+                    && uPlane == vPlane + 1) {
+                // NV21 compatible - already set up above
+                break;
+            }
 
             // copy to I420
             yStride = width;
             uStride = vStride = yStride / 2;
             MemoryBlock conversionBuffer = mConversionBuffers.fetch(yPlaneSize * 3 / 2);
             mConversionBuffersInUse.emplace(conversionBuffer.data(), conversionBuffer);
-            MediaImage2 img = CreateYUV420PlanarMediaImage2(width, height, yStride, height);
+            MediaImage2 img;
+            switch (mIvVideoColorFormat) {
+                case IV_YUV_420P:
+                    img = CreateYUV420PlanarMediaImage2(width, height, yStride, height);
+                    yPlane = conversionBuffer.data();
+                    uPlane = yPlane + yPlaneSize;
+                    vPlane = uPlane + yPlaneSize / 4;
+                    break;
+                case IV_YUV_420SP_VU:
+                    img = CreateYUV420SemiPlanarMediaImage2(width, height, yStride, height);
+                    img.mPlane[MediaImage2::U].mOffset++;
+                    img.mPlane[MediaImage2::V].mOffset--;
+                    yPlane = conversionBuffer.data();
+                    vPlane = yPlane + yPlaneSize;
+                    uPlane = vPlane + 1;
+                    break;
+                case IV_YUV_420SP_UV:
+                default:
+                    img = CreateYUV420SemiPlanarMediaImage2(width, height, yStride, height);
+                    yPlane = conversionBuffer.data();
+                    uPlane = yPlane + yPlaneSize;
+                    vPlane = uPlane + 1;
+                    break;
+            }
             status_t err = ImageCopy(conversionBuffer.data(), &img, *input);
             if (err != OK) {
                 ALOGE("Buffer conversion failed: %d", err);
                 return C2_BAD_VALUE;
             }
-            yPlane = conversionBuffer.data();
-            uPlane = yPlane + yPlaneSize;
-            vPlane = uPlane + yPlaneSize / 4;
             break;
 
         }
@@ -1425,15 +1477,17 @@
             break;
         }
 
-        case IV_YUV_420SP_UV:
         case IV_YUV_420SP_VU:
+            uPlane = vPlane;
+            [[fallthrough]];
+        case IV_YUV_420SP_UV:
         default:
         {
             ps_inp_raw_buf->apv_bufs[0] = yPlane;
             ps_inp_raw_buf->apv_bufs[1] = uPlane;
 
             ps_inp_raw_buf->au4_wd[0] = mSize->width;
-            ps_inp_raw_buf->au4_wd[1] = mSize->width;
+            ps_inp_raw_buf->au4_wd[1] = mSize->width / 2;
 
             ps_inp_raw_buf->au4_ht[0] = mSize->height;
             ps_inp_raw_buf->au4_ht[1] = mSize->height / 2;
@@ -1502,15 +1556,17 @@
     }
     // while (!mSawOutputEOS && !outQueue.empty()) {
     c2_status_t error;
-    ive_video_encode_ip_t s_encode_ip;
-    ive_video_encode_op_t s_encode_op;
-    memset(&s_encode_op, 0, sizeof(s_encode_op));
+    ih264e_video_encode_ip_t s_video_encode_ip = {};
+    ih264e_video_encode_op_t s_video_encode_op = {};
+    ive_video_encode_ip_t *ps_encode_ip = &s_video_encode_ip.s_ive_ip;
+    ive_video_encode_op_t *ps_encode_op = &s_video_encode_op.s_ive_op;
+    memset(ps_encode_op, 0, sizeof(*ps_encode_op));
 
     if (!mSpsPpsHeaderReceived) {
         constexpr uint32_t kHeaderLength = MIN_STREAM_SIZE;
         uint8_t header[kHeaderLength];
         error = setEncodeArgs(
-                &s_encode_ip, &s_encode_op, nullptr, header, kHeaderLength, workIndex);
+                ps_encode_ip, ps_encode_op, nullptr, header, kHeaderLength, workIndex);
         if (error != C2_OK) {
             ALOGE("setEncodeArgs failed: %d", error);
             mSignalledError = true;
@@ -1518,22 +1574,22 @@
             work->workletsProcessed = 1u;
             return;
         }
-        status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+        status = ive_api_function(mCodecCtx, ps_encode_ip, ps_encode_op);
 
         if (IV_SUCCESS != status) {
             ALOGE("Encode header failed = 0x%x\n",
-                    s_encode_op.u4_error_code);
+                    ps_encode_op->u4_error_code);
             work->workletsProcessed = 1u;
             return;
         } else {
             ALOGV("Bytes Generated in header %d\n",
-                    s_encode_op.s_out_buf.u4_bytes);
+                    ps_encode_op->s_out_buf.u4_bytes);
         }
 
         mSpsPpsHeaderReceived = true;
 
         std::unique_ptr<C2StreamInitDataInfo::output> csd =
-            C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+            C2StreamInitDataInfo::output::AllocUnique(ps_encode_op->s_out_buf.u4_bytes, 0u);
         if (!csd) {
             ALOGE("CSD allocation failed");
             mSignalledError = true;
@@ -1541,7 +1597,7 @@
             work->workletsProcessed = 1u;
             return;
         }
-        memcpy(csd->m.value, header, s_encode_op.s_out_buf.u4_bytes);
+        memcpy(csd->m.value, header, ps_encode_op->s_out_buf.u4_bytes);
         work->worklets.front()->output.configUpdate.push_back(std::move(csd));
 
         DUMP_TO_FILE(
@@ -1635,7 +1691,7 @@
         }
 
         error = setEncodeArgs(
-                &s_encode_ip, &s_encode_op, view.get(), wView.base(), wView.capacity(), workIndex);
+                ps_encode_ip, ps_encode_op, view.get(), wView.base(), wView.capacity(), workIndex);
         if (error != C2_OK) {
             ALOGE("setEncodeArgs failed : %d", error);
             mSignalledError = true;
@@ -1652,17 +1708,17 @@
         /* Compute time elapsed between end of previous decode()
          * to start of current decode() */
         TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
-        status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+        status = ive_api_function(mCodecCtx, &s_video_encode_ip, &s_video_encode_op);
 
         if (IV_SUCCESS != status) {
-            if ((s_encode_op.u4_error_code & 0xFF) == IH264E_BITSTREAM_BUFFER_OVERFLOW) {
+            if ((ps_encode_op->u4_error_code & 0xFF) == IH264E_BITSTREAM_BUFFER_OVERFLOW) {
                 // TODO: use IVE_CMD_CTL_GETBUFINFO for proper max input size?
                 mOutBufferSize *= 2;
                 mOutBlock.reset();
                 continue;
             }
             ALOGE("Encode Frame failed = 0x%x\n",
-                    s_encode_op.u4_error_code);
+                    ps_encode_op->u4_error_code);
             mSignalledError = true;
             work->result = C2_CORRUPTED;
             work->workletsProcessed = 1u;
@@ -1672,7 +1728,7 @@
 
     // Hold input buffer reference
     if (inputBuffer) {
-        mBuffers[s_encode_ip.s_inp_buf.apv_bufs[0]] = inputBuffer;
+        mBuffers[ps_encode_ip->s_inp_buf.apv_bufs[0]] = inputBuffer;
     }
 
     GETTIME(&mTimeEnd, nullptr);
@@ -1680,9 +1736,9 @@
     TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
 
     ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
-            s_encode_op.s_out_buf.u4_bytes);
+            ps_encode_op->s_out_buf.u4_bytes);
 
-    void *freed = s_encode_op.s_inp_buf.apv_bufs[0];
+    void *freed = ps_encode_op->s_inp_buf.apv_bufs[0];
     /* If encoder frees up an input buffer, mark it as free */
     if (freed != nullptr) {
         if (mBuffers.count(freed) == 0u) {
@@ -1694,17 +1750,17 @@
         }
     }
 
-    if (s_encode_op.output_present) {
-        if (!s_encode_op.s_out_buf.u4_bytes) {
+    if (ps_encode_op->output_present) {
+        if (!ps_encode_op->s_out_buf.u4_bytes) {
             ALOGE("Error: Output present but bytes generated is zero");
             mSignalledError = true;
             work->result = C2_CORRUPTED;
             work->workletsProcessed = 1u;
             return;
         }
-        uint64_t workId = ((uint64_t)s_encode_op.u4_timestamp_high << 32) |
-                      s_encode_op.u4_timestamp_low;
-        finishWork(workId, work, &s_encode_op);
+        uint64_t workId = ((uint64_t)ps_encode_op->u4_timestamp_high << 32) |
+                      ps_encode_op->u4_timestamp_low;
+        finishWork(workId, work, ps_encode_op);
     }
     if (mSawInputEOS) {
         drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -1744,9 +1800,11 @@
             ALOGE("graphic view map failed %d", wView.error());
             return C2_CORRUPTED;
         }
-        ive_video_encode_ip_t s_encode_ip;
-        ive_video_encode_op_t s_encode_op;
-        if (C2_OK != setEncodeArgs(&s_encode_ip, &s_encode_op, nullptr,
+        ih264e_video_encode_ip_t s_video_encode_ip = {};
+        ih264e_video_encode_op_t s_video_encode_op = {};
+        ive_video_encode_ip_t *ps_encode_ip = &s_video_encode_ip.s_ive_ip;
+        ive_video_encode_op_t *ps_encode_op = &s_video_encode_op.s_ive_op;
+        if (C2_OK != setEncodeArgs(ps_encode_ip, ps_encode_op, nullptr,
                                    wView.base(), wView.capacity(), 0)) {
             ALOGE("setEncodeArgs failed for drainInternal");
             mSignalledError = true;
@@ -1754,9 +1812,9 @@
             work->workletsProcessed = 1u;
             return C2_CORRUPTED;
         }
-        (void)ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+        (void)ive_api_function(mCodecCtx, &s_video_encode_ip, &s_video_encode_op);
 
-        void *freed = s_encode_op.s_inp_buf.apv_bufs[0];
+        void *freed = ps_encode_op->s_inp_buf.apv_bufs[0];
         /* If encoder frees up an input buffer, mark it as free */
         if (freed != nullptr) {
             if (mBuffers.count(freed) == 0u) {
@@ -1768,10 +1826,10 @@
             }
         }
 
-        if (s_encode_op.output_present) {
-            uint64_t workId = ((uint64_t)s_encode_op.u4_timestamp_high << 32) |
-                          s_encode_op.u4_timestamp_low;
-            finishWork(workId, work, &s_encode_op);
+        if (ps_encode_op->output_present) {
+            uint64_t workId = ((uint64_t)ps_encode_op->u4_timestamp_high << 32) |
+                          ps_encode_op->u4_timestamp_low;
+            finishWork(workId, work, ps_encode_op);
         } else {
             if (work->workletsProcessed != 1u) {
                 work->worklets.front()->output.flags = work->input.flags;
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.h b/media/codec2/components/avc/C2SoftAvcEnc.h
index ee6d47a..673a282 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.h
+++ b/media/codec2/components/avc/C2SoftAvcEnc.h
@@ -24,8 +24,7 @@
 #include <SimpleC2Component.h>
 
 #include "ih264_typedefs.h"
-#include "iv2.h"
-#include "ive2.h"
+#include "ih264e.h"
 
 namespace android {
 
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index a374dfa..6bcf3a2 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -26,7 +26,6 @@
 #include <SimpleC2Interface.h>
 
 #include "C2SoftHevcDec.h"
-#include "ihevcd_cxa.h"
 
 namespace android {
 
@@ -380,12 +379,14 @@
     }
 
     while (true) {
-        ivd_video_decode_ip_t s_decode_ip;
-        ivd_video_decode_op_t s_decode_op;
+        ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+        ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+        ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+        ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
 
-        setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, nullptr, 0, 0, 0);
-        (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
-        if (0 == s_decode_op.u4_output_present) {
+        setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, nullptr, 0, 0, 0);
+        (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+        if (0 == ps_decode_op->u4_output_present) {
             resetPlugin();
             break;
         }
@@ -400,8 +401,8 @@
 }
 
 status_t C2SoftHevcDec::createDecoder() {
-    ivdext_create_ip_t s_create_ip;
-    ivdext_create_op_t s_create_op;
+    ivdext_create_ip_t s_create_ip = {};
+    ivdext_create_op_t s_create_op = {};
 
     s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
     s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
@@ -427,8 +428,8 @@
 }
 
 status_t C2SoftHevcDec::setNumCores() {
-    ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip;
-    ivdext_ctl_set_num_cores_op_t s_set_num_cores_op;
+    ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip = {};
+    ivdext_ctl_set_num_cores_op_t s_set_num_cores_op = {};
 
     s_set_num_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
     s_set_num_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -447,22 +448,26 @@
 }
 
 status_t C2SoftHevcDec::setParams(size_t stride, IVD_VIDEO_DECODE_MODE_T dec_mode) {
-    ivd_ctl_set_config_ip_t s_set_dyn_params_ip;
-    ivd_ctl_set_config_op_t s_set_dyn_params_op;
+    ihevcd_cxa_ctl_set_config_ip_t s_hevcd_set_dyn_params_ip = {};
+    ihevcd_cxa_ctl_set_config_op_t s_hevcd_set_dyn_params_op = {};
+    ivd_ctl_set_config_ip_t *ps_set_dyn_params_ip =
+        &s_hevcd_set_dyn_params_ip.s_ivd_ctl_set_config_ip_t;
+    ivd_ctl_set_config_op_t *ps_set_dyn_params_op =
+        &s_hevcd_set_dyn_params_op.s_ivd_ctl_set_config_op_t;
 
-    s_set_dyn_params_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
-    s_set_dyn_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
-    s_set_dyn_params_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
-    s_set_dyn_params_ip.u4_disp_wd = (UWORD32) stride;
-    s_set_dyn_params_ip.e_frm_skip_mode = IVD_SKIP_NONE;
-    s_set_dyn_params_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
-    s_set_dyn_params_ip.e_vid_dec_mode = dec_mode;
-    s_set_dyn_params_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+    ps_set_dyn_params_ip->u4_size = sizeof(ihevcd_cxa_ctl_set_config_ip_t);
+    ps_set_dyn_params_ip->e_cmd = IVD_CMD_VIDEO_CTL;
+    ps_set_dyn_params_ip->e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+    ps_set_dyn_params_ip->u4_disp_wd = (UWORD32) stride;
+    ps_set_dyn_params_ip->e_frm_skip_mode = IVD_SKIP_NONE;
+    ps_set_dyn_params_ip->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+    ps_set_dyn_params_ip->e_vid_dec_mode = dec_mode;
+    ps_set_dyn_params_op->u4_size = sizeof(ihevcd_cxa_ctl_set_config_op_t);
     IV_API_CALL_STATUS_T status = ivdec_api_function(mDecHandle,
-                                                     &s_set_dyn_params_ip,
-                                                     &s_set_dyn_params_op);
+                                                     ps_set_dyn_params_ip,
+                                                     ps_set_dyn_params_op);
     if (status != IV_SUCCESS) {
-        ALOGE("error in %s: 0x%x", __func__, s_set_dyn_params_op.u4_error_code);
+        ALOGE("error in %s: 0x%x", __func__, ps_set_dyn_params_op->u4_error_code);
         return UNKNOWN_ERROR;
     }
 
@@ -470,8 +475,8 @@
 }
 
 status_t C2SoftHevcDec::getVersion() {
-    ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip;
-    ivd_ctl_getversioninfo_op_t s_get_versioninfo_op;
+    ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip = {};
+    ivd_ctl_getversioninfo_op_t s_get_versioninfo_op = {};
     UWORD8 au1_buf[512];
 
     s_get_versioninfo_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
@@ -529,7 +534,7 @@
         if (OK != setParams(mStride, IVD_DECODE_FRAME)) return false;
     }
 
-    ps_decode_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+    ps_decode_ip->u4_size = sizeof(ihevcd_cxa_video_decode_ip_t);
     ps_decode_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
     if (inBuffer) {
         ps_decode_ip->u4_ts = tsMarker;
@@ -558,15 +563,15 @@
         ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
     }
     ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
-    ps_decode_op->u4_size = sizeof(ivd_video_decode_op_t);
+    ps_decode_op->u4_size = sizeof(ihevcd_cxa_video_decode_op_t);
     ps_decode_op->u4_output_present = 0;
 
     return true;
 }
 
 bool C2SoftHevcDec::getVuiParams() {
-    ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip;
-    ivdext_ctl_get_vui_params_op_t s_get_vui_params_op;
+    ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip = {};
+    ivdext_ctl_get_vui_params_op_t s_get_vui_params_op = {};
 
     s_get_vui_params_ip.u4_size = sizeof(ivdext_ctl_get_vui_params_ip_t);
     s_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -614,8 +619,8 @@
 }
 
 status_t C2SoftHevcDec::setFlushMode() {
-    ivd_ctl_flush_ip_t s_set_flush_ip;
-    ivd_ctl_flush_op_t s_set_flush_op;
+    ivd_ctl_flush_ip_t s_set_flush_ip = {};
+    ivd_ctl_flush_op_t s_set_flush_op = {};
 
     s_set_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
     s_set_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -633,8 +638,8 @@
 }
 
 status_t C2SoftHevcDec::resetDecoder() {
-    ivd_ctl_reset_ip_t s_reset_ip;
-    ivd_ctl_reset_op_t s_reset_op;
+    ivd_ctl_reset_ip_t s_reset_ip = {};
+    ivd_ctl_reset_op_t s_reset_op = {};
 
     s_reset_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
     s_reset_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -662,8 +667,8 @@
 
 status_t C2SoftHevcDec::deleteDecoder() {
     if (mDecHandle) {
-        ivdext_delete_ip_t s_delete_ip;
-        ivdext_delete_op_t s_delete_op;
+        ivdext_delete_ip_t s_delete_ip = {};
+        ivdext_delete_op_t s_delete_op = {};
 
         s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
         s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
@@ -835,9 +840,11 @@
             work->result = wView.error();
             return;
         }
-        ivd_video_decode_ip_t s_decode_ip;
-        ivd_video_decode_op_t s_decode_op;
-        if (!setDecodeArgs(&s_decode_ip, &s_decode_op, &rView, &wView,
+        ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+        ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+        ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+        ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
+        if (!setDecodeArgs(ps_decode_ip, ps_decode_op, &rView, &wView,
                            inOffset + inPos, inSize - inPos, workIndex)) {
             mSignalledError = true;
             work->workletsProcessed = 1u;
@@ -852,26 +859,26 @@
         WORD32 delay;
         GETTIME(&mTimeStart, nullptr);
         TIME_DIFF(mTimeEnd, mTimeStart, delay);
-        (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+        (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
         WORD32 decodeTime;
         GETTIME(&mTimeEnd, nullptr);
         TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
         ALOGV("decodeTime=%6d delay=%6d numBytes=%6d", decodeTime, delay,
-              s_decode_op.u4_num_bytes_consumed);
-        if (IVD_MEM_ALLOC_FAILED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+              ps_decode_op->u4_num_bytes_consumed);
+        if (IVD_MEM_ALLOC_FAILED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
             ALOGE("allocation failure in decoder");
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
             return;
         } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED ==
-                   (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+                   (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
             ALOGE("unsupported resolution : %dx%d", mWidth, mHeight);
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
             return;
-        } else if (IVD_RES_CHANGED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+        } else if (IVD_RES_CHANGED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
             ALOGV("resolution changed");
             drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
             resetDecoder();
@@ -880,16 +887,16 @@
 
             /* Decode header and get new dimensions */
             setParams(mStride, IVD_DECODE_HEADER);
-            (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
-        } else if (IS_IVD_FATAL_ERROR(s_decode_op.u4_error_code)) {
-            ALOGE("Fatal error in decoder 0x%x", s_decode_op.u4_error_code);
+            (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+        } else if (IS_IVD_FATAL_ERROR(ps_decode_op->u4_error_code)) {
+            ALOGE("Fatal error in decoder 0x%x", ps_decode_op->u4_error_code);
             mSignalledError = true;
             work->workletsProcessed = 1u;
             work->result = C2_CORRUPTED;
             return;
         }
-        if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
-            mOutputDelay = s_decode_op.i4_reorder_depth;
+        if (ps_decode_op->i4_reorder_depth >= 0 && mOutputDelay != ps_decode_op->i4_reorder_depth) {
+            mOutputDelay = ps_decode_op->i4_reorder_depth;
             ALOGV("New Output delay %d ", mOutputDelay);
 
             C2PortActualDelayTuning::output outputDelay(mOutputDelay);
@@ -907,15 +914,15 @@
                 return;
             }
         }
-        if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
+        if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
             if (mHeaderDecoded == false) {
                 mHeaderDecoded = true;
-                setParams(ALIGN32(s_decode_op.u4_pic_wd), IVD_DECODE_FRAME);
+                setParams(ALIGN32(ps_decode_op->u4_pic_wd), IVD_DECODE_FRAME);
             }
-            if (s_decode_op.u4_pic_wd != mWidth ||  s_decode_op.u4_pic_ht != mHeight) {
-                mWidth = s_decode_op.u4_pic_wd;
-                mHeight = s_decode_op.u4_pic_ht;
-                CHECK_EQ(0u, s_decode_op.u4_output_present);
+            if (ps_decode_op->u4_pic_wd != mWidth ||  ps_decode_op->u4_pic_ht != mHeight) {
+                mWidth = ps_decode_op->u4_pic_wd;
+                mHeight = ps_decode_op->u4_pic_ht;
+                CHECK_EQ(0u, ps_decode_op->u4_output_present);
 
                 C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
                 std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -935,15 +942,15 @@
             }
         }
         (void) getVuiParams();
-        hasPicture |= (1 == s_decode_op.u4_frame_decoded_flag);
-        if (s_decode_op.u4_output_present) {
-            finishWork(s_decode_op.u4_ts, work);
+        hasPicture |= (1 == ps_decode_op->u4_frame_decoded_flag);
+        if (ps_decode_op->u4_output_present) {
+            finishWork(ps_decode_op->u4_ts, work);
         }
-        if (0 == s_decode_op.u4_num_bytes_consumed) {
+        if (0 == ps_decode_op->u4_num_bytes_consumed) {
             ALOGD("Bytes consumed is zero. Ignoring remaining bytes");
             break;
         }
-        inPos += s_decode_op.u4_num_bytes_consumed;
+        inPos += ps_decode_op->u4_num_bytes_consumed;
         if (hasPicture && (inSize - inPos)) {
             ALOGD("decoded frame in current access nal, ignoring further trailing bytes %d",
                   (int)inSize - (int)inPos);
@@ -985,16 +992,18 @@
             ALOGE("graphic view map failed %d", wView.error());
             return C2_CORRUPTED;
         }
-        ivd_video_decode_ip_t s_decode_ip;
-        ivd_video_decode_op_t s_decode_op;
-        if (!setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, &wView, 0, 0, 0)) {
+        ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+        ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+        ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+        ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
+        if (!setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, &wView, 0, 0, 0)) {
             mSignalledError = true;
             work->workletsProcessed = 1u;
             return C2_CORRUPTED;
         }
-        (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
-        if (s_decode_op.u4_output_present) {
-            finishWork(s_decode_op.u4_ts, work);
+        (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+        if (ps_decode_op->u4_output_present) {
+            finishWork(ps_decode_op->u4_ts, work);
         } else {
             fillEmptyWork(work);
             break;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.h b/media/codec2/components/hevc/C2SoftHevcDec.h
index 600d7c1..b9b0a48 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.h
+++ b/media/codec2/components/hevc/C2SoftHevcDec.h
@@ -23,8 +23,7 @@
 #include <SimpleC2Component.h>
 
 #include "ihevc_typedefs.h"
-#include "iv.h"
-#include "ivd.h"
+#include "ihevcd_cxa.h"
 
 namespace android {
 
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
new file mode 120000
index 0000000..136279c
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/.clang-format
@@ -0,0 +1 @@
+../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 1f95eaf..efc5813 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -33,11 +33,11 @@
 using android::C2AllocatorIon;
 
 #include "media_c2_hidl_test_common.h"
+using DecodeTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<DecodeTestParameters> kDecodeTestParameters;
 
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
-        kDecodeTestParameters;
-
-static std::vector<std::tuple<std::string, std::string, std::string>> kCsdFlushTestParameters;
+using CsdFlushTestParameters = std::tuple<std::string, std::string, bool>;
+static std::vector<CsdFlushTestParameters> kCsdFlushTestParameters;
 
 struct CompToURL {
     std::string mime;
@@ -46,36 +46,26 @@
 };
 
 std::vector<CompToURL> kCompToURL = {
-    {"mp4a-latm",
-     "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.info"},
-    {"mp4a-latm",
-     "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"},
-    {"audio/mpeg",
-     "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz.info"},
-    {"audio/mpeg",
-     "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"},
-    {"3gpp",
-     "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz.info"},
-    {"3gpp",
-     "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"},
-    {"amr-wb",
-     "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz.info"},
-    {"amr-wb",
-     "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"},
-    {"vorbis",
-     "bbb_vorbis_stereo_128kbps_48000hz.vorbis", "bbb_vorbis_stereo_128kbps_48000hz.info"},
-    {"opus",
-     "bbb_opus_stereo_128kbps_48000hz.opus", "bbb_opus_stereo_128kbps_48000hz.info"},
-    {"g711-alaw",
-     "bbb_g711alaw_1ch_8khz.raw", "bbb_g711alaw_1ch_8khz.info"},
-    {"g711-mlaw",
-     "bbb_g711mulaw_1ch_8khz.raw", "bbb_g711mulaw_1ch_8khz.info"},
-    {"gsm",
-     "bbb_gsm_1ch_8khz_13kbps.raw", "bbb_gsm_1ch_8khz_13kbps.info"},
-    {"raw",
-     "bbb_raw_1ch_8khz_s32le.raw", "bbb_raw_1ch_8khz_s32le.info"},
-    {"flac",
-     "bbb_flac_stereo_680kbps_48000hz.flac", "bbb_flac_stereo_680kbps_48000hz.info"},
+        {"mp4a-latm", "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.info"},
+        {"mp4a-latm", "bbb_aac_stereo_128kbps_48000hz.aac",
+         "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"},
+        {"audio/mpeg", "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz.info"},
+        {"audio/mpeg", "bbb_mp3_stereo_192kbps_48000hz.mp3",
+         "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"},
+        {"3gpp", "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz.info"},
+        {"3gpp", "sine_amrnb_1ch_12kbps_8000hz.amrnb",
+         "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"},
+        {"amr-wb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz.info"},
+        {"amr-wb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb",
+         "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"},
+        {"vorbis", "bbb_vorbis_stereo_128kbps_48000hz.vorbis",
+         "bbb_vorbis_stereo_128kbps_48000hz.info"},
+        {"opus", "bbb_opus_stereo_128kbps_48000hz.opus", "bbb_opus_stereo_128kbps_48000hz.info"},
+        {"g711-alaw", "bbb_g711alaw_1ch_8khz.raw", "bbb_g711alaw_1ch_8khz.info"},
+        {"g711-mlaw", "bbb_g711mulaw_1ch_8khz.raw", "bbb_g711mulaw_1ch_8khz.info"},
+        {"gsm", "bbb_gsm_1ch_8khz_13kbps.raw", "bbb_gsm_1ch_8khz_13kbps.info"},
+        {"raw", "bbb_raw_1ch_8khz_s32le.raw", "bbb_raw_1ch_8khz_s32le.info"},
+        {"flac", "bbb_flac_stereo_680kbps_48000hz.flac", "bbb_flac_stereo_680kbps_48000hz.info"},
 };
 
 class LinearBuffer : public C2Buffer {
@@ -212,9 +202,8 @@
     }
 };
 
-class Codec2AudioDecHidlTest
-    : public Codec2AudioDecHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2AudioDecHidlTest : public Codec2AudioDecHidlTestBase,
+                               public ::testing::WithParamInterface<TestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -438,10 +427,8 @@
     ASSERT_EQ(mComponent->stop(), C2_OK);
 }
 
-class Codec2AudioDecDecodeTest
-    : public Codec2AudioDecHidlTestBase,
-      public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2AudioDecDecodeTest : public Codec2AudioDecHidlTestBase,
+                                 public ::testing::WithParamInterface<DecodeTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -452,9 +439,8 @@
     description("Decodes input file");
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
 
-    uint32_t streamIndex = std::stoi(std::get<2>(GetParam()));
-    ;
-    bool signalEOS = !std::get<3>(GetParam()).compare("true");
+    uint32_t streamIndex = std::get<2>(GetParam());
+    bool signalEOS = std::get<3>(GetParam());
     mTimestampDevTest = true;
     char mURL[512], info[512];
     android::Vector<FrameInfo> Info;
@@ -771,9 +757,8 @@
     ASSERT_EQ(mComponent->stop(), C2_OK);
 }
 
-class Codec2AudioDecCsdInputTests
-    : public Codec2AudioDecHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {
+class Codec2AudioDecCsdInputTests : public Codec2AudioDecHidlTestBase,
+                                    public ::testing::WithParamInterface<CsdFlushTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -819,7 +804,7 @@
     ASSERT_EQ(eleStream.is_open(), true);
 
     bool signalEOS = false;
-    bool flushCsd = !std::get<2>(GetParam()).compare("true");
+    bool flushCsd = std::get<2>(GetParam());
     ALOGV("sending %d csd data ", numCsds);
     int framesToDecode = numCsds;
     ASSERT_NO_FATAL_FAILURE(decodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
@@ -875,16 +860,16 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2AudioDecHidlTest, testing::ValuesIn(kTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 // DecodeTest with StreamIndex and EOS / No EOS
 INSTANTIATE_TEST_SUITE_P(StreamIndexAndEOS, Codec2AudioDecDecodeTest,
                          testing::ValuesIn(kDecodeTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 INSTANTIATE_TEST_SUITE_P(CsdInputs, Codec2AudioDecCsdInputTests,
                          testing::ValuesIn(kCsdFlushTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 }  // anonymous namespace
 
@@ -893,18 +878,18 @@
     kTestParameters = getTestParameters(C2Component::DOMAIN_AUDIO, C2Component::KIND_DECODER);
     for (auto params : kTestParameters) {
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 1, false));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 1, true));
 
         kCsdFlushTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), true));
         kCsdFlushTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), false));
     }
 
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
index 1445e59..562c77f 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
@@ -35,8 +35,9 @@
 
 #include "media_c2_hidl_test_common.h"
 
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
-        kEncodeTestParameters;
+using EncodeTestParameters = std::tuple<std::string, std::string, bool, int32_t>;
+
+static std::vector<EncodeTestParameters> kEncodeTestParameters;
 
 class LinearBuffer : public C2Buffer {
   public:
@@ -72,30 +73,17 @@
         mLinearPool = std::make_shared<C2PooledBlockPool>(mLinearAllocator, mBlockPoolId++);
         ASSERT_NE(mLinearPool, nullptr);
 
-        mCompName = unknown_comp;
-        struct StringToName {
-            const char* Name;
-            standardComp CompName;
-        };
-        const StringToName kStringToName[] = {
-                {"aac", aac}, {"flac", flac}, {"opus", opus}, {"amrnb", amrnb}, {"amrwb", amrwb},
-        };
-        const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
+        std::vector<std::unique_ptr<C2Param>> queried;
+        mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
+                          &queried);
+        ASSERT_GT(queried.size(), 0);
 
-        // Find the component type
-        for (size_t i = 0; i < kNumStringToName; ++i) {
-            if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
-                mCompName = kStringToName[i].CompName;
-                break;
-            }
-        }
+        mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
         mEos = false;
         mCsd = false;
         mFramesReceived = 0;
         mWorkResult = C2_OK;
         mOutputSize = 0u;
-        if (mCompName == unknown_comp) mDisableTest = true;
-        if (mDisableTest) std::cout << "[   WARN   ] Test Disabled \n";
         getInputMaxBufSize();
     }
 
@@ -110,6 +98,8 @@
     // Get the test parameters from GetParam call.
     virtual void getParams() {}
 
+    void GetURLForComponent(char* mURL);
+
     // callback function to process onWorkDone received by Listener
     void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
         for (std::unique_ptr<C2Work>& work : workItems) {
@@ -130,21 +120,13 @@
             }
         }
     }
-    enum standardComp {
-        aac,
-        flac,
-        opus,
-        amrnb,
-        amrwb,
-        unknown_comp,
-    };
 
+    std::string mMime;
     std::string mInstanceName;
     std::string mComponentName;
     bool mEos;
     bool mCsd;
     bool mDisableTest;
-    standardComp mCompName;
 
     int32_t mWorkResult;
     uint32_t mFramesReceived;
@@ -189,9 +171,8 @@
     }
 };
 
-class Codec2AudioEncHidlTest
-    : public Codec2AudioEncHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2AudioEncHidlTest : public Codec2AudioEncHidlTestBase,
+                               public ::testing::WithParamInterface<TestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -199,7 +180,7 @@
 };
 
 void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
-                       Codec2AudioEncHidlTest::standardComp compName, bool& disableTest) {
+                       bool& disableTest) {
     // Validate its a C2 Component
     if (component->getName().find("c2") == std::string::npos) {
         ALOGE("Not a c2 component");
@@ -226,13 +207,6 @@
             return;
         }
     }
-
-    // Validates component name
-    if (compName == Codec2AudioEncHidlTest::unknown_comp) {
-        ALOGE("Component InValid");
-        disableTest = true;
-        return;
-    }
     ALOGV("Component Valid");
 }
 
@@ -250,56 +224,48 @@
 }
 
 // Get config params for a component
-bool getConfigParams(Codec2AudioEncHidlTest::standardComp compName, int32_t* nChannels,
-                     int32_t* nSampleRate, int32_t* samplesPerFrame) {
-    switch (compName) {
-        case Codec2AudioEncHidlTest::aac:
-            *nChannels = 2;
-            *nSampleRate = 48000;
-            *samplesPerFrame = 1024;
-            break;
-        case Codec2AudioEncHidlTest::flac:
-            *nChannels = 2;
-            *nSampleRate = 48000;
-            *samplesPerFrame = 1152;
-            break;
-        case Codec2AudioEncHidlTest::opus:
-            *nChannels = 2;
-            *nSampleRate = 48000;
-            *samplesPerFrame = 960;
-            break;
-        case Codec2AudioEncHidlTest::amrnb:
-            *nChannels = 1;
-            *nSampleRate = 8000;
-            *samplesPerFrame = 160;
-            break;
-        case Codec2AudioEncHidlTest::amrwb:
-            *nChannels = 1;
-            *nSampleRate = 16000;
-            *samplesPerFrame = 160;
-            break;
-        default:
-            return false;
-    }
+bool getConfigParams(std::string mime, int32_t* nChannels, int32_t* nSampleRate,
+                     int32_t* samplesPerFrame) {
+    if (mime.find("mp4a-latm") != std::string::npos) {
+        *nChannels = 2;
+        *nSampleRate = 48000;
+        *samplesPerFrame = 1024;
+    } else if (mime.find("flac") != std::string::npos) {
+        *nChannels = 2;
+        *nSampleRate = 48000;
+        *samplesPerFrame = 1152;
+    } else if (mime.find("opus") != std::string::npos) {
+        *nChannels = 2;
+        *nSampleRate = 48000;
+        *samplesPerFrame = 960;
+    } else if (mime.find("3gpp") != std::string::npos) {
+        *nChannels = 1;
+        *nSampleRate = 8000;
+        *samplesPerFrame = 160;
+    } else if (mime.find("amr-wb") != std::string::npos) {
+        *nChannels = 1;
+        *nSampleRate = 16000;
+        *samplesPerFrame = 160;
+    } else
+        return false;
+
     return true;
 }
 
 // LookUpTable of clips and metadata for component testing
-void GetURLForComponent(Codec2AudioEncHidlTest::standardComp comp, char* mURL) {
+void Codec2AudioEncHidlTestBase::GetURLForComponent(char* mURL) {
     struct CompToURL {
-        Codec2AudioEncHidlTest::standardComp comp;
+        std::string mime;
         const char* mURL;
     };
     static const CompToURL kCompToURL[] = {
-            {Codec2AudioEncHidlTest::standardComp::aac, "bbb_raw_2ch_48khz_s16le.raw"},
-            {Codec2AudioEncHidlTest::standardComp::amrnb, "bbb_raw_1ch_8khz_s16le.raw"},
-            {Codec2AudioEncHidlTest::standardComp::amrwb, "bbb_raw_1ch_16khz_s16le.raw"},
-            {Codec2AudioEncHidlTest::standardComp::flac, "bbb_raw_2ch_48khz_s16le.raw"},
-            {Codec2AudioEncHidlTest::standardComp::opus, "bbb_raw_2ch_48khz_s16le.raw"},
+            {"mp4a-latm", "bbb_raw_2ch_48khz_s16le.raw"}, {"3gpp", "bbb_raw_1ch_8khz_s16le.raw"},
+            {"amr-wb", "bbb_raw_1ch_16khz_s16le.raw"},    {"flac", "bbb_raw_2ch_48khz_s16le.raw"},
+            {"opus", "bbb_raw_2ch_48khz_s16le.raw"},
     };
 
     for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
-        if (kCompToURL[i].comp == comp) {
+        if (mMime.find(kCompToURL[i].mime) != std::string::npos) {
             strcat(mURL, kCompToURL[i].mURL);
             return;
         }
@@ -392,14 +358,12 @@
 TEST_P(Codec2AudioEncHidlTest, validateCompName) {
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     ALOGV("Checks if the given component is a valid audio component");
-    validateComponent(mComponent, mCompName, mDisableTest);
+    validateComponent(mComponent, mDisableTest);
     ASSERT_EQ(mDisableTest, false);
 }
 
-class Codec2AudioEncEncodeTest
-    : public Codec2AudioEncHidlTestBase,
-      public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2AudioEncEncodeTest : public Codec2AudioEncHidlTestBase,
+                                 public ::testing::WithParamInterface<EncodeTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -411,17 +375,17 @@
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     char mURL[512];
     strcpy(mURL, sResourceDir.c_str());
-    GetURLForComponent(mCompName, mURL);
-    bool signalEOS = !std::get<2>(GetParam()).compare("true");
+    GetURLForComponent(mURL);
+    bool signalEOS = std::get<2>(GetParam());
     // Ratio w.r.t to mInputMaxBufSize
-    int32_t inputMaxBufRatio = std::stoi(std::get<3>(GetParam()));
+    int32_t inputMaxBufRatio = std::get<3>(GetParam());
 
     int32_t nChannels;
     int32_t nSampleRate;
     int32_t samplesPerFrame;
 
-    if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
-        std::cout << "Failed to get the config params for " << mCompName << " component\n";
+    if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+        std::cout << "Failed to get the config params for " << mComponentName << "\n";
         std::cout << "[   WARN   ] Test Skipped \n";
         return;
     }
@@ -461,11 +425,9 @@
         ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
         ASSERT_TRUE(false);
     }
-    if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
-        if (!mCsd) {
-            ALOGE("CSD buffer missing");
-            ASSERT_TRUE(false);
-        }
+    if ((mMime.find("flac") != std::string::npos) || (mMime.find("opus") != std::string::npos) ||
+        (mMime.find("mp4a-latm") != std::string::npos)) {
+        ASSERT_TRUE(mCsd) << "CSD buffer missing";
     }
     ASSERT_EQ(mEos, true);
     ASSERT_EQ(mComponent->stop(), C2_OK);
@@ -519,15 +481,15 @@
 
     char mURL[512];
     strcpy(mURL, sResourceDir.c_str());
-    GetURLForComponent(mCompName, mURL);
+    GetURLForComponent(mURL);
 
     mFlushedIndices.clear();
     int32_t nChannels;
     int32_t nSampleRate;
     int32_t samplesPerFrame;
 
-    if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
-        std::cout << "Failed to get the config params for " << mCompName << " component\n";
+    if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+        std::cout << "Failed to get the config params for " << mComponentName << "\n";
         std::cout << "[   WARN   ] Test Skipped \n";
         return;
     }
@@ -584,7 +546,7 @@
 
     char mURL[512];
     strcpy(mURL, sResourceDir.c_str());
-    GetURLForComponent(mCompName, mURL);
+    GetURLForComponent(mURL);
 
     std::ifstream eleStream;
     eleStream.open(mURL, std::ifstream::binary);
@@ -597,8 +559,8 @@
     int32_t numFrames = 16;
     int32_t maxChannelCount = 8;
 
-    if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
-        std::cout << "Failed to get the config params for " << mCompName << " component\n";
+    if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+        std::cout << "Failed to get the config params for " << mComponentName << "\n";
         std::cout << "[   WARN   ] Test Skipped \n";
         return;
     }
@@ -608,7 +570,7 @@
 
     // Looping through the maximum number of channel count supported by encoder
     for (nChannels = 1; nChannels < maxChannelCount; nChannels++) {
-        ALOGV("Configuring %u encoder for channel count = %d", mCompName, nChannels);
+        ALOGV("Configuring encoder %s  for channel count = %d", mComponentName.c_str(), nChannels);
         if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
             std::cout << "[   WARN   ] Test Skipped \n";
             return;
@@ -665,7 +627,9 @@
             ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
             ASSERT_TRUE(false);
         }
-        if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
+        if ((mMime.find("flac") != std::string::npos) ||
+            (mMime.find("opus") != std::string::npos) ||
+            (mMime.find("mp4a-latm") != std::string::npos)) {
             ASSERT_TRUE(mCsd) << "CSD buffer missing";
         }
         ASSERT_TRUE(mEos);
@@ -684,7 +648,7 @@
 
     char mURL[512];
     strcpy(mURL, sResourceDir.c_str());
-    GetURLForComponent(mCompName, mURL);
+    GetURLForComponent(mURL);
 
     std::ifstream eleStream;
     eleStream.open(mURL, std::ifstream::binary);
@@ -696,8 +660,8 @@
     int32_t nChannels;
     int32_t numFrames = 16;
 
-    if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
-        std::cout << "Failed to get the config params for " << mCompName << " component\n";
+    if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+        std::cout << "Failed to get the config params for " << mComponentName << "\n";
         std::cout << "[   WARN   ] Test Skipped \n";
         return;
     }
@@ -708,7 +672,7 @@
     uint32_t prevSampleRate = 0u;
 
     for (int32_t nSampleRate : sampleRateValues) {
-        ALOGV("Configuring %u encoder for SampleRate = %d", mCompName, nSampleRate);
+        ALOGV("Configuring encoder %s  for SampleRate = %d", mComponentName.c_str(), nSampleRate);
         if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
             std::cout << "[   WARN   ] Test Skipped \n";
             return;
@@ -769,7 +733,9 @@
             ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
             ASSERT_TRUE(false);
         }
-        if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
+        if ((mMime.find("flac") != std::string::npos) ||
+            (mMime.find("opus") != std::string::npos) ||
+            (mMime.find("mp4a-latm") != std::string::npos)) {
             ASSERT_TRUE(mCsd) << "CSD buffer missing";
         }
         ASSERT_TRUE(mEos);
@@ -783,13 +749,13 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2AudioEncHidlTest, testing::ValuesIn(kTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 // EncodeTest with EOS / No EOS and inputMaxBufRatio
 // inputMaxBufRatio is ratio w.r.t. to mInputMaxBufSize
 INSTANTIATE_TEST_SUITE_P(EncodeTest, Codec2AudioEncEncodeTest,
                          testing::ValuesIn(kEncodeTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 }  // anonymous namespace
 
@@ -798,13 +764,13 @@
     kTestParameters = getTestParameters(C2Component::DOMAIN_AUDIO, C2Component::KIND_ENCODER);
     for (auto params : kTestParameters) {
         kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "1"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), false, 1));
         kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "2"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), false, 2));
         kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "1"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), true, 1));
         kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "2"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), true, 2));
     }
 
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index de34705..1f1681d 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -27,13 +27,13 @@
 std::string sComponentNamePrefix = "";
 
 static constexpr struct option kArgOptions[] = {
-    {"res", required_argument, 0, 'P'},
-    {"prefix", required_argument, 0, 'p'},
-    {"help", required_argument, 0, 'h'},
-    {nullptr, 0, nullptr, 0},
+        {"res", required_argument, 0, 'P'},
+        {"prefix", required_argument, 0, 'p'},
+        {"help", required_argument, 0, 'h'},
+        {nullptr, 0, nullptr, 0},
 };
 
-void printUsage(char *me) {
+void printUsage(char* me) {
     std::cerr << "VTS tests to test codec2 components \n";
     std::cerr << "Usage: " << me << " [options] \n";
     std::cerr << "\t -P,  --res:    Mandatory path to a folder that contains test resources \n";
@@ -49,17 +49,17 @@
     int option_index;
     while ((arg = getopt_long(argc, argv, ":P:p:h", kArgOptions, &option_index)) != -1) {
         switch (arg) {
-        case 'P':
-            sResourceDir = optarg;
-            break;
-        case 'p':
-            sComponentNamePrefix = optarg;
-            break;
-        case 'h':
-            printUsage(argv[0]);
-            break;
-        default:
-            break;
+            case 'P':
+                sResourceDir = optarg;
+                break;
+            case 'p':
+                sComponentNamePrefix = optarg;
+                break;
+            case 'h':
+                printUsage(argv[0]);
+                break;
+            default:
+                break;
         }
     }
 }
@@ -134,8 +134,7 @@
         for (size_t i = 0; i < updates.size(); ++i) {
             C2Param* param = updates[i].get();
             if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
-                C2StreamInitDataInfo::output* csdBuffer =
-                        (C2StreamInitDataInfo::output*)(param);
+                C2StreamInitDataInfo::output* csdBuffer = (C2StreamInitDataInfo::output*)(param);
                 size_t csdSize = csdBuffer->flexCount();
                 if (csdSize > 0) csd = true;
             } else if ((param->index() == C2StreamSampleRateInfo::output::PARAM_TYPE) ||
@@ -160,8 +159,7 @@
             typedef std::unique_lock<std::mutex> ULock;
             ULock l(queueLock);
             workQueue.push_back(std::move(work));
-            if (!flushedIndices.empty() &&
-                (frameIndexIt != flushedIndices.end())) {
+            if (!flushedIndices.empty() && (frameIndexIt != flushedIndices.end())) {
                 flushedIndices.erase(frameIndexIt);
             }
             queueCondition.notify_all();
@@ -178,15 +176,15 @@
 }
 
 // Return all test parameters, a list of tuple of <instance, component>
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters() {
+const std::vector<TestParameters>& getTestParameters() {
     return getTestParameters(C2Component::DOMAIN_OTHER, C2Component::KIND_OTHER);
 }
 
 // Return all test parameters, a list of tuple of <instance, component> with matching domain and
 // kind.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters(
-        C2Component::domain_t domain, C2Component::kind_t kind) {
-    static std::vector<std::tuple<std::string, std::string>> parameters;
+const std::vector<TestParameters>& getTestParameters(C2Component::domain_t domain,
+                                                     C2Component::kind_t kind) {
+    static std::vector<TestParameters> parameters;
 
     auto instances = android::Codec2Client::GetServiceNames();
     for (std::string instance : instances) {
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index a2f1561..e74f247 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -40,7 +40,8 @@
 
 using namespace ::std::chrono;
 
-static std::vector<std::tuple<std::string, std::string>> kTestParameters;
+using TestParameters = std::tuple<std::string, std::string>;
+static std::vector<TestParameters> kTestParameters;
 
 // Resource directory
 extern std::string sResourceDir;
@@ -54,6 +55,18 @@
     int64_t timestamp;
 };
 
+template <typename... T>
+static inline std::string PrintInstanceTupleNameToString(
+        const testing::TestParamInfo<std::tuple<T...>>& info) {
+    std::stringstream ss;
+    std::apply([&ss](auto&&... elems) { ((ss << elems << '_'), ...); }, info.param);
+    ss << info.index;
+    std::string param_string = ss.str();
+    auto isNotAlphaNum = [](char c) { return !std::isalnum(c); };
+    std::replace_if(param_string.begin(), param_string.end(), isNotAlphaNum, '_');
+    return param_string;
+}
+
 /*
  * Handle Callback functions onWorkDone(), onTripped(),
  * onError(), onDeath(), onFramesRendered()
@@ -114,12 +127,12 @@
 void parseArgs(int argc, char** argv);
 
 // Return all test parameters, a list of tuple of <instance, component>.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters();
+const std::vector<TestParameters>& getTestParameters();
 
 // Return all test parameters, a list of tuple of <instance, component> with matching domain and
 // kind.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters(
-        C2Component::domain_t domain, C2Component::kind_t kind);
+const std::vector<TestParameters>& getTestParameters(C2Component::domain_t domain,
+                                                     C2Component::kind_t kind);
 
 /*
  * common functions declarations
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index 0648dd9..29acd33 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -53,9 +53,8 @@
     }
 
 namespace {
-
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
-        kInputTestParameters;
+using InputTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<InputTestParameters> kInputTestParameters;
 
 // google.codec2 Component test setup
 class Codec2ComponentHidlTestBase : public ::testing::Test {
@@ -120,9 +119,8 @@
     }
 };
 
-class Codec2ComponentHidlTest
-    : public Codec2ComponentHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2ComponentHidlTest : public Codec2ComponentHidlTestBase,
+                                public ::testing::WithParamInterface<TestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -317,10 +315,8 @@
     ASSERT_EQ(err, C2_OK);
 }
 
-class Codec2ComponentInputTests
-    : public Codec2ComponentHidlTestBase,
-      public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2ComponentInputTests : public Codec2ComponentHidlTestBase,
+                                  public ::testing::WithParamInterface<InputTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -330,8 +326,8 @@
 TEST_P(Codec2ComponentInputTests, InputBufferTest) {
     description("Tests for different inputs");
 
-    uint32_t flags = std::stoul(std::get<2>(GetParam()));
-    bool isNullBuffer = !std::get<3>(GetParam()).compare("true");
+    uint32_t flags = std::get<2>(GetParam());
+    bool isNullBuffer = std::get<3>(GetParam());
     if (isNullBuffer)
         ALOGD("Testing for null input buffer with flag : %u", flags);
     else
@@ -350,11 +346,10 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2ComponentHidlTest, testing::ValuesIn(kTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 INSTANTIATE_TEST_CASE_P(NonStdInputs, Codec2ComponentInputTests,
-                        testing::ValuesIn(kInputTestParameters),
-                        android::hardware::PrintInstanceTupleNameToString<>);
+                        testing::ValuesIn(kInputTestParameters), PrintInstanceTupleNameToString<>);
 }  // anonymous namespace
 
 // TODO: Add test for Invalid work,
@@ -364,18 +359,15 @@
     kTestParameters = getTestParameters();
     for (auto params : kTestParameters) {
         kInputTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
+        kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+                                                       C2FrameData::FLAG_END_OF_STREAM, true));
         kInputTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params),
-                                std::to_string(C2FrameData::FLAG_END_OF_STREAM), "true"));
-        kInputTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
-        kInputTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params),
-                                std::to_string(C2FrameData::FLAG_CODEC_CONFIG), "false"));
-        kInputTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params),
-                                std::to_string(C2FrameData::FLAG_END_OF_STREAM), "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
+        kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+                                                       C2FrameData::FLAG_CODEC_CONFIG, false));
+        kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+                                                       C2FrameData::FLAG_END_OF_STREAM, false));
     }
 
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index f29da0e..d0a1c31 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -40,10 +40,11 @@
 #include "media_c2_hidl_test_common.h"
 #include "media_c2_video_hidl_test_common.h"
 
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
-        kDecodeTestParameters;
+using DecodeTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<DecodeTestParameters> kDecodeTestParameters;
 
-static std::vector<std::tuple<std::string, std::string, std::string>> kCsdFlushTestParameters;
+using CsdFlushTestParameters = std::tuple<std::string, std::string, bool>;
+static std::vector<CsdFlushTestParameters> kCsdFlushTestParameters;
 
 struct CompToURL {
     std::string mime;
@@ -52,43 +53,30 @@
     std::string chksum;
 };
 std::vector<CompToURL> kCompToURL = {
-    {"avc",
-     "bbb_avc_176x144_300kbps_60fps.h264", "bbb_avc_176x144_300kbps_60fps.info",
-     "bbb_avc_176x144_300kbps_60fps_chksum.md5"},
-    {"avc",
-     "bbb_avc_640x360_768kbps_30fps.h264", "bbb_avc_640x360_768kbps_30fps.info",
-     "bbb_avc_640x360_768kbps_30fps_chksum.md5"},
-    {"hevc",
-     "bbb_hevc_176x144_176kbps_60fps.hevc", "bbb_hevc_176x144_176kbps_60fps.info",
-     "bbb_hevc_176x144_176kbps_60fps_chksum.md5"},
-    {"hevc",
-     "bbb_hevc_640x360_1600kbps_30fps.hevc", "bbb_hevc_640x360_1600kbps_30fps.info",
-     "bbb_hevc_640x360_1600kbps_30fps_chksum.md5"},
-    {"mpeg2",
-     "bbb_mpeg2_176x144_105kbps_25fps.m2v", "bbb_mpeg2_176x144_105kbps_25fps.info", ""},
-    {"mpeg2",
-     "bbb_mpeg2_352x288_1mbps_60fps.m2v","bbb_mpeg2_352x288_1mbps_60fps.info", ""},
-    {"3gpp",
-     "bbb_h263_352x288_300kbps_12fps.h263", "bbb_h263_352x288_300kbps_12fps.info", ""},
-    {"mp4v-es",
-     "bbb_mpeg4_352x288_512kbps_30fps.m4v", "bbb_mpeg4_352x288_512kbps_30fps.info", ""},
-    {"vp8",
-     "bbb_vp8_176x144_240kbps_60fps.vp8", "bbb_vp8_176x144_240kbps_60fps.info", ""},
-    {"vp8",
-     "bbb_vp8_640x360_2mbps_30fps.vp8", "bbb_vp8_640x360_2mbps_30fps.info",
-     "bbb_vp8_640x360_2mbps_30fps_chksm.md5"},
-    {"vp9",
-     "bbb_vp9_176x144_285kbps_60fps.vp9", "bbb_vp9_176x144_285kbps_60fps.info", ""},
-    {"vp9",
-     "bbb_vp9_640x360_1600kbps_30fps.vp9", "bbb_vp9_640x360_1600kbps_30fps.info",
-     "bbb_vp9_640x360_1600kbps_30fps_chksm.md5"},
-    {"vp9",
-     "bbb_vp9_704x480_280kbps_24fps_altref_2.vp9",
-     "bbb_vp9_704x480_280kbps_24fps_altref_2.info", ""},
-    {"av01",
-     "bbb_av1_640_360.av1", "bbb_av1_640_360.info", "bbb_av1_640_360_chksum.md5"},
-    {"av01",
-     "bbb_av1_176_144.av1", "bbb_av1_176_144.info", "bbb_av1_176_144_chksm.md5"},
+        {"avc", "bbb_avc_176x144_300kbps_60fps.h264", "bbb_avc_176x144_300kbps_60fps.info",
+         "bbb_avc_176x144_300kbps_60fps_chksum.md5"},
+        {"avc", "bbb_avc_640x360_768kbps_30fps.h264", "bbb_avc_640x360_768kbps_30fps.info",
+         "bbb_avc_640x360_768kbps_30fps_chksum.md5"},
+        {"hevc", "bbb_hevc_176x144_176kbps_60fps.hevc", "bbb_hevc_176x144_176kbps_60fps.info",
+         "bbb_hevc_176x144_176kbps_60fps_chksum.md5"},
+        {"hevc", "bbb_hevc_640x360_1600kbps_30fps.hevc", "bbb_hevc_640x360_1600kbps_30fps.info",
+         "bbb_hevc_640x360_1600kbps_30fps_chksum.md5"},
+        {"mpeg2", "bbb_mpeg2_176x144_105kbps_25fps.m2v", "bbb_mpeg2_176x144_105kbps_25fps.info",
+         ""},
+        {"mpeg2", "bbb_mpeg2_352x288_1mbps_60fps.m2v", "bbb_mpeg2_352x288_1mbps_60fps.info", ""},
+        {"3gpp", "bbb_h263_352x288_300kbps_12fps.h263", "bbb_h263_352x288_300kbps_12fps.info", ""},
+        {"mp4v-es", "bbb_mpeg4_352x288_512kbps_30fps.m4v", "bbb_mpeg4_352x288_512kbps_30fps.info",
+         ""},
+        {"vp8", "bbb_vp8_176x144_240kbps_60fps.vp8", "bbb_vp8_176x144_240kbps_60fps.info", ""},
+        {"vp8", "bbb_vp8_640x360_2mbps_30fps.vp8", "bbb_vp8_640x360_2mbps_30fps.info",
+         "bbb_vp8_640x360_2mbps_30fps_chksm.md5"},
+        {"vp9", "bbb_vp9_176x144_285kbps_60fps.vp9", "bbb_vp9_176x144_285kbps_60fps.info", ""},
+        {"vp9", "bbb_vp9_640x360_1600kbps_30fps.vp9", "bbb_vp9_640x360_1600kbps_30fps.info",
+         "bbb_vp9_640x360_1600kbps_30fps_chksm.md5"},
+        {"vp9", "bbb_vp9_704x480_280kbps_24fps_altref_2.vp9",
+         "bbb_vp9_704x480_280kbps_24fps_altref_2.info", ""},
+        {"av01", "bbb_av1_640_360.av1", "bbb_av1_640_360.info", "bbb_av1_640_360_chksum.md5"},
+        {"av01", "bbb_av1_176_144.av1", "bbb_av1_176_144.info", "bbb_av1_176_144_chksm.md5"},
 };
 
 class LinearBuffer : public C2Buffer {
@@ -251,8 +239,7 @@
                 if (!codecConfig && !work->worklets.front()->output.buffers.empty()) {
                     if (mReorderDepth < 0) {
                         C2PortReorderBufferDepthTuning::output reorderBufferDepth;
-                        mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK,
-                                          nullptr);
+                        mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK, nullptr);
                         mReorderDepth = reorderBufferDepth.value;
                         if (mReorderDepth > 0) {
                             // TODO: Add validation for reordered output
@@ -333,9 +320,8 @@
     }
 };
 
-class Codec2VideoDecHidlTest
-    : public Codec2VideoDecHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2VideoDecHidlTest : public Codec2VideoDecHidlTestBase,
+                               public ::testing::WithParamInterface<TestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -541,10 +527,8 @@
     return false;
 }
 
-class Codec2VideoDecDecodeTest
-    : public Codec2VideoDecHidlTestBase,
-      public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2VideoDecDecodeTest : public Codec2VideoDecHidlTestBase,
+                                 public ::testing::WithParamInterface<DecodeTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -556,8 +540,8 @@
     description("Decodes input file");
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
 
-    uint32_t streamIndex = std::stoi(std::get<2>(GetParam()));
-    bool signalEOS = !std::get<2>(GetParam()).compare("true");
+    uint32_t streamIndex = std::get<2>(GetParam());
+    bool signalEOS = std::get<3>(GetParam());
     mTimestampDevTest = true;
 
     char mURL[512], info[512], chksum[512];
@@ -657,8 +641,8 @@
     description("Adaptive Decode Test");
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     if (!(strcasestr(mMime.c_str(), "avc") || strcasestr(mMime.c_str(), "hevc") ||
-        strcasestr(mMime.c_str(), "vp8") || strcasestr(mMime.c_str(), "vp9") ||
-        strcasestr(mMime.c_str(), "mpeg2"))) {
+          strcasestr(mMime.c_str(), "vp8") || strcasestr(mMime.c_str(), "vp9") ||
+          strcasestr(mMime.c_str(), "mpeg2"))) {
         return;
     }
 
@@ -987,9 +971,8 @@
     }
 }
 
-class Codec2VideoDecCsdInputTests
-    : public Codec2VideoDecHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {
+class Codec2VideoDecCsdInputTests : public Codec2VideoDecHidlTestBase,
+                                    public ::testing::WithParamInterface<CsdFlushTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -1022,7 +1005,7 @@
     bool flushedDecoder = false;
     bool signalEOS = false;
     bool keyFrame = false;
-    bool flushCsd = !std::get<2>(GetParam()).compare("true");
+    bool flushCsd = std::get<2>(GetParam());
 
     ALOGV("sending %d csd data ", numCsds);
     int framesToDecode = numCsds;
@@ -1092,16 +1075,16 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2VideoDecHidlTest, testing::ValuesIn(kTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 // DecodeTest with StreamIndex and EOS / No EOS
 INSTANTIATE_TEST_SUITE_P(StreamIndexAndEOS, Codec2VideoDecDecodeTest,
                          testing::ValuesIn(kDecodeTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 INSTANTIATE_TEST_SUITE_P(CsdInputs, Codec2VideoDecCsdInputTests,
                          testing::ValuesIn(kCsdFlushTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 }  // anonymous namespace
 
@@ -1111,22 +1094,22 @@
     kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_DECODER);
     for (auto params : kTestParameters) {
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 1, false));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 1, true));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "2", "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 2, false));
         kDecodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "2", "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 2, true));
 
         kCsdFlushTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "true"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), true));
         kCsdFlushTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "false"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), false));
     }
 
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 7e35de7..23ceff4 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -41,10 +41,11 @@
         : C2Buffer({block->share(C2Rect(block->width(), block->height()), ::C2Fence())}) {}
 };
 
-static std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string>>
-        kEncodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
-        kEncodeResolutionTestParameters;
+using EncodeTestParameters = std::tuple<std::string, std::string, bool, bool, bool>;
+static std::vector<EncodeTestParameters> kEncodeTestParameters;
+
+using EncodeResolutionTestParameters = std::tuple<std::string, std::string, int32_t, int32_t>;
+static std::vector<EncodeResolutionTestParameters> kEncodeResolutionTestParameters;
 
 namespace {
 
@@ -75,26 +76,13 @@
         mGraphicPool = std::make_shared<C2PooledBlockPool>(mGraphicAllocator, mBlockPoolId++);
         ASSERT_NE(mGraphicPool, nullptr);
 
-        mCompName = unknown_comp;
-        struct StringToName {
-            const char* Name;
-            standardComp CompName;
-        };
+        std::vector<std::unique_ptr<C2Param>> queried;
+        mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
+                          &queried);
+        ASSERT_GT(queried.size(), 0);
 
-        const StringToName kStringToName[] = {
-                {"h263", h263}, {"avc", avc}, {"mpeg4", mpeg4},
-                {"hevc", hevc}, {"vp8", vp8}, {"vp9", vp9},
-        };
-
-        const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
-
-        // Find the component type
-        for (size_t i = 0; i < kNumStringToName; ++i) {
-            if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
-                mCompName = kStringToName[i].CompName;
-                break;
-            }
-        }
+        mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
+        std::cout << "mime : " << mMime << "\n";
         mEos = false;
         mCsd = false;
         mConfigBPictures = false;
@@ -103,7 +91,6 @@
         mTimestampUs = 0u;
         mOutputSize = 0u;
         mTimestampDevTest = false;
-        if (mCompName == unknown_comp) mDisableTest = true;
 
         C2SecureModeTuning secureModeTuning{};
         mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
@@ -185,16 +172,7 @@
         }
     }
 
-    enum standardComp {
-        h263,
-        avc,
-        mpeg4,
-        hevc,
-        vp8,
-        vp9,
-        unknown_comp,
-    };
-
+    std::string mMime;
     std::string mInstanceName;
     std::string mComponentName;
     bool mEos;
@@ -202,7 +180,6 @@
     bool mDisableTest;
     bool mConfigBPictures;
     bool mTimestampDevTest;
-    standardComp mCompName;
     uint32_t mFramesReceived;
     uint32_t mFailedWorkReceived;
     uint64_t mTimestampUs;
@@ -229,9 +206,8 @@
     }
 };
 
-class Codec2VideoEncHidlTest
-    : public Codec2VideoEncHidlTestBase,
-      public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2VideoEncHidlTest : public Codec2VideoEncHidlTestBase,
+                               public ::testing::WithParamInterface<TestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -239,7 +215,7 @@
 };
 
 void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
-                       Codec2VideoEncHidlTest::standardComp compName, bool& disableTest) {
+                       bool& disableTest) {
     // Validate its a C2 Component
     if (component->getName().find("c2") == std::string::npos) {
         ALOGE("Not a c2 component");
@@ -266,13 +242,6 @@
             return;
         }
     }
-
-    // Validates component name
-    if (compName == Codec2VideoEncHidlTest::unknown_comp) {
-        ALOGE("Component InValid");
-        disableTest = true;
-        return;
-    }
     ALOGV("Component Valid");
 }
 
@@ -403,14 +372,12 @@
 TEST_P(Codec2VideoEncHidlTest, validateCompName) {
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     ALOGV("Checks if the given component is a valid video component");
-    validateComponent(mComponent, mCompName, mDisableTest);
+    validateComponent(mComponent, mDisableTest);
     ASSERT_EQ(mDisableTest, false);
 }
 
-class Codec2VideoEncEncodeTest
-    : public Codec2VideoEncHidlTestBase,
-      public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string, std::string>> {
+class Codec2VideoEncEncodeTest : public Codec2VideoEncHidlTestBase,
+                                 public ::testing::WithParamInterface<EncodeTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -424,10 +391,10 @@
     char mURL[512];
     int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
     int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
-    bool signalEOS = !std::get<2>(GetParam()).compare("true");
+    bool signalEOS = std::get<3>(GetParam());
     // Send an empty frame to receive CSD data from encoder.
-    bool sendEmptyFirstFrame = !std::get<3>(GetParam()).compare("true");
-    mConfigBPictures = !std::get<4>(GetParam()).compare("true");
+    bool sendEmptyFirstFrame = std::get<3>(GetParam());
+    mConfigBPictures = std::get<4>(GetParam());
 
     strcpy(mURL, sResourceDir.c_str());
     GetURLForComponent(mURL);
@@ -515,9 +482,9 @@
         ASSERT_TRUE(false);
     }
 
-    if (mCompName == vp8 || mCompName == h263) {
+    if ((mMime.find("vp8") != std::string::npos) || (mMime.find("3gpp") != std::string::npos)) {
         ASSERT_FALSE(mCsd) << "CSD Buffer not expected";
-    } else if (mCompName != vp9) {
+    } else if (mMime.find("vp9") == std::string::npos) {
         ASSERT_TRUE(mCsd) << "CSD Buffer not received";
     }
 
@@ -695,8 +662,7 @@
 
 class Codec2VideoEncResolutionTest
     : public Codec2VideoEncHidlTestBase,
-      public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string>> {
+      public ::testing::WithParamInterface<EncodeResolutionTestParameters> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -708,8 +674,8 @@
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
 
     std::ifstream eleStream;
-    int32_t nWidth = std::stoi(std::get<2>(GetParam()));
-    int32_t nHeight = std::stoi(std::get<3>(GetParam()));
+    int32_t nWidth = std::get<2>(GetParam());
+    int32_t nHeight = std::get<3>(GetParam());
     ALOGD("Trying encode for width %d height %d", nWidth, nHeight);
     mEos = false;
 
@@ -741,14 +707,16 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2VideoEncHidlTest, testing::ValuesIn(kTestParameters),
-                         android::hardware::PrintInstanceTupleNameToString<>);
+                         PrintInstanceTupleNameToString<>);
 
 INSTANTIATE_TEST_SUITE_P(NonStdSizes, Codec2VideoEncResolutionTest,
-                         ::testing::ValuesIn(kEncodeResolutionTestParameters));
+                         ::testing::ValuesIn(kEncodeResolutionTestParameters),
+                         PrintInstanceTupleNameToString<>);
 
 // EncodeTest with EOS / No EOS
 INSTANTIATE_TEST_SUITE_P(EncodeTestwithEOS, Codec2VideoEncEncodeTest,
-                         ::testing::ValuesIn(kEncodeTestParameters));
+                         ::testing::ValuesIn(kEncodeTestParameters),
+                         PrintInstanceTupleNameToString<>);
 
 TEST_P(Codec2VideoEncHidlTest, AdaptiveBitrateTest) {
     description("Encodes input file for different bitrates");
@@ -842,27 +810,23 @@
     parseArgs(argc, argv);
     kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER);
     for (auto params : kTestParameters) {
-        constexpr char const* kBoolString[] = { "false", "true" };
         for (size_t i = 0; i < 1 << 3; ++i) {
             kEncodeTestParameters.push_back(std::make_tuple(
-                    std::get<0>(params), std::get<1>(params),
-                    kBoolString[i & 1],
-                    kBoolString[(i >> 1) & 1],
-                    kBoolString[(i >> 2) & 1]));
+                    std::get<0>(params), std::get<1>(params), i & 1, (i >> 1) & 1, (i >> 2) & 1));
         }
 
         kEncodeResolutionTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "52", "18"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 52, 18));
         kEncodeResolutionTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "365", "365"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 365, 365));
         kEncodeResolutionTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "484", "362"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 484, 362));
         kEncodeResolutionTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "244", "488"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 244, 488));
         kEncodeResolutionTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "852", "608"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 852, 608));
         kEncodeResolutionTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "1400", "442"));
+                std::make_tuple(std::get<0>(params), std::get<1>(params), 1400, 442));
     }
 
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 0296004..d49141c 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -1503,8 +1503,7 @@
         bqId = 0;
         mOutputBufferQueue->configure(nullIgbp, generation, 0, nullptr);
     } else {
-        mOutputBufferQueue->configure(surface, generation, bqId,
-                                      mBase1_2 ? &syncObj : nullptr);
+        mOutputBufferQueue->configure(surface, generation, bqId, nullptr);
     }
     ALOGD("surface generation remote change %u HAL ver: %s",
           generation, syncObj ? "1.2" : "1.0");
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index 0b38bc1..bed8aeb 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -19,7 +19,6 @@
 #include <android-base/logging.h>
 
 #include <set>
-#include <sstream>
 
 #include <dlfcn.h>
 
@@ -383,6 +382,9 @@
         // Configure the next interface with the params.
         std::vector<C2Param *> configParams;
         for (size_t i = 0; i < heapParams.size(); ++i) {
+            if (!heapParams[i]) {
+                continue;
+            }
             if (heapParams[i]->forStream()) {
                 heapParams[i] = C2Param::CopyAsStream(
                         *heapParams[i], false /* output */, heapParams[i]->stream());
@@ -782,10 +784,7 @@
         if (C2_OK != mStore->createComponent(filter.traits.name, &comp)) {
             return {};
         }
-        if (C2_OK != mStore->createInterface(filter.traits.name, &intf)) {
-            return {};
-        }
-        filters.push_back({comp, intf, filter.traits, filter.desc});
+        filters.push_back({comp, comp->intf(), filter.traits, filter.desc});
     }
     return filters;
 }
@@ -869,7 +868,7 @@
     }
     std::vector<Component> filters = createFilters();
     std::shared_ptr wrapped = std::make_shared<WrappedDecoder>(
-            comp, std::move(filters), weak_from_this());
+            comp, std::vector(filters), weak_from_this());
     {
         std::unique_lock lock(mWrappedComponentsMutex);
         std::vector<std::weak_ptr<const C2Component>> &components =
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index ad28545..73217b0 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -362,7 +362,10 @@
         .limitTo(D::OUTPUT & D::READ));
 
     add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
-        .limitTo(D::ENCODER & D::OUTPUT));
+        .limitTo(D::ENCODER & D::CODED));
+    // Some audio decoders require bitrate information to be set
+    add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
+        .limitTo(D::AUDIO & D::DECODER & D::CODED));
     // we also need to put the bitrate in the max bitrate field
     add(ConfigMapper(KEY_MAX_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
         .limitTo(D::ENCODER & D::READ & D::OUTPUT));
@@ -1174,11 +1177,14 @@
 
     bool changed = false;
     if (domain & mInputDomain) {
-        sp<AMessage> oldFormat = mInputFormat->dup();
+        sp<AMessage> oldFormat = mInputFormat;
+        mInputFormat = mInputFormat->dup(); // trigger format changed
         mInputFormat->extend(getFormatForDomain(reflected, mInputDomain));
         if (mInputFormat->countEntries() != oldFormat->countEntries()
                 || mInputFormat->changesFrom(oldFormat)->countEntries() > 0) {
             changed = true;
+        } else {
+            mInputFormat = oldFormat; // no change
         }
     }
     if (domain & mOutputDomain) {
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
index 9cec23f..af054c7 100644
--- a/media/codec2/sfplugin/FrameReassembler.cpp
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -143,6 +143,7 @@
 
     if (buffer->size() > 0) {
         mCurrentOrdinal.timestamp = timeUs;
+        mCurrentOrdinal.customOrdinal = timeUs;
     }
 
     size_t frameSizeBytes = mFrameSize.value() * mChannelCount * bytesPerSample();
@@ -219,6 +220,7 @@
 
     ++mCurrentOrdinal.frameIndex;
     mCurrentOrdinal.timestamp += mFrameSize.value() * 1000000 / mSampleRate;
+    mCurrentOrdinal.customOrdinal = mCurrentOrdinal.timestamp;
     mCurrentBlock.reset();
     mWriteView.reset();
 }
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index 74e7ef1..2f4d6b1 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -33,11 +33,13 @@
         "libcodec2_vndk",
         "libcutils",
         "liblog",
+        "libnativewindow",
         "libstagefright_foundation",
         "libutils",
     ],
 
     static_libs: [
+        "libarect",
         "libyuv_static",
     ],
 
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index a54af83..a78d811 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -23,6 +23,7 @@
 #include <list>
 #include <mutex>
 
+#include <android/hardware_buffer.h>
 #include <media/hardware/HardwareAPI.h>
 #include <media/stagefright/foundation/AUtils.h>
 
@@ -136,31 +137,56 @@
     int width = view.crop().width;
     int height = view.crop().height;
 
-    if ((IsNV12(view) && IsI420(img)) || (IsI420(view) && IsNV12(img))) {
-        // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
-        if (IsNV12(view) && IsI420(img)) {
+    if (IsNV12(view)) {
+        if (IsNV12(img)) {
+            libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+            libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
+            return OK;
+        } else if (IsNV21(img)) {
+            if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_u, src_stride_u,
+                                    dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+                return OK;
+            }
+        } else if (IsI420(img)) {
             if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
                                     dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
                 return OK;
             }
-        } else {
+        }
+    } else if (IsNV21(view)) {
+        if (IsNV12(img)) {
+            if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_v, src_stride_v,
+                                    dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
+                return OK;
+            }
+        } else if (IsNV21(img)) {
+            libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+            libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height / 2);
+            return OK;
+        } else if (IsI420(img)) {
+            if (!libyuv::NV21ToI420(src_y, src_stride_y, src_v, src_stride_v, dst_y, dst_stride_y,
+                                    dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
+                return OK;
+            }
+        }
+    } else if (IsI420(view)) {
+        if (IsNV12(img)) {
             if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
                                     dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
                 return OK;
             }
+        } else if (IsNV21(img)) {
+            if (!libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+                                    dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+                return OK;
+            }
+        } else if (IsI420(img)) {
+            libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+            libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
+            libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
+            return OK;
         }
     }
-    if (IsNV12(view) && IsNV12(img)) {
-        libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
-        libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
-        return OK;
-    }
-    if (IsI420(view) && IsI420(img)) {
-        libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
-        libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
-        libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
-        return OK;
-    }
     return _ImageCopy<true>(view, img, imgBase);
 }
 
@@ -182,33 +208,56 @@
     int32_t dst_stride_v = view.layout().planes[2].rowInc;
     int width = view.crop().width;
     int height = view.crop().height;
-    if ((IsNV12(img) && IsI420(view)) || (IsI420(img) && IsNV12(view))) {
-        // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
-        if (IsNV12(img) && IsI420(view)) {
+    if (IsNV12(img)) {
+        if (IsNV12(view)) {
+            libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+            libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
+            return OK;
+        } else if (IsNV21(view)) {
+            if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_u, src_stride_u,
+                                    dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+                return OK;
+            }
+        } else if (IsI420(view)) {
             if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
                                     dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
                 return OK;
             }
-        } else {
+        }
+    } else if (IsNV21(img)) {
+        if (IsNV12(view)) {
+            if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_v, src_stride_v,
+                                    dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
+                return OK;
+            }
+        } else if (IsNV21(view)) {
+            libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+            libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height / 2);
+            return OK;
+        } else if (IsI420(view)) {
+            if (!libyuv::NV21ToI420(src_y, src_stride_y, src_v, src_stride_v, dst_y, dst_stride_y,
+                                    dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
+                return OK;
+            }
+        }
+    } else if (IsI420(img)) {
+        if (IsNV12(view)) {
             if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
                                     dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
                 return OK;
             }
+        } else if (IsNV21(view)) {
+            if (!libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+                                    dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+                return OK;
+            }
+        } else if (IsI420(view)) {
+            libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+            libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
+            libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
+            return OK;
         }
     }
-    if (IsNV12(img) && IsNV12(view)) {
-        // For NV12, copy Y and UV plane
-        libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
-        libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
-        return OK;
-    }
-    if (IsI420(img) && IsI420(view)) {
-        // For I420, copy Y, U and V plane.
-        libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
-        libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
-        libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
-        return OK;
-    }
     return _ImageCopy<false>(view, img, imgBase);
 }
 
@@ -250,6 +299,20 @@
             && layout.planes[layout.PLANE_V].offset == 1);
 }
 
+bool IsNV21(const C2GraphicView &view) {
+    if (!IsYUV420(view)) {
+        return false;
+    }
+    const C2PlanarLayout &layout = view.layout();
+    return (layout.rootPlanes == 2
+            && layout.planes[layout.PLANE_U].colInc == 2
+            && layout.planes[layout.PLANE_U].rootIx == layout.PLANE_V
+            && layout.planes[layout.PLANE_U].offset == 1
+            && layout.planes[layout.PLANE_V].colInc == 2
+            && layout.planes[layout.PLANE_V].rootIx == layout.PLANE_V
+            && layout.planes[layout.PLANE_V].offset == 0);
+}
+
 bool IsI420(const C2GraphicView &view) {
     if (!IsYUV420(view)) {
         return false;
@@ -286,6 +349,15 @@
             && (img->mPlane[2].mOffset - img->mPlane[1].mOffset == 1));
 }
 
+bool IsNV21(const MediaImage2 *img) {
+    if (!IsYUV420(img)) {
+        return false;
+    }
+    return (img->mPlane[1].mColInc == 2
+            && img->mPlane[2].mColInc == 2
+            && (img->mPlane[1].mOffset - img->mPlane[2].mOffset == 1));
+}
+
 bool IsI420(const MediaImage2 *img) {
     if (!IsYUV420(img)) {
         return false;
@@ -295,6 +367,76 @@
             && img->mPlane[2].mOffset > img->mPlane[1].mOffset);
 }
 
+FlexLayout GetYuv420FlexibleLayout() {
+    static FlexLayout sLayout = []{
+        AHardwareBuffer_Desc desc = {
+            16,  // width
+            16,  // height
+            1,   // layers
+            AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420,
+            AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+            0,   // stride
+            0,   // rfu0
+            0,   // rfu1
+        };
+        AHardwareBuffer *buffer = nullptr;
+        int ret = AHardwareBuffer_allocate(&desc, &buffer);
+        if (ret != 0) {
+            return FLEX_LAYOUT_UNKNOWN;
+        }
+        class AutoCloser {
+        public:
+            AutoCloser(AHardwareBuffer *buffer) : mBuffer(buffer), mLocked(false) {}
+            ~AutoCloser() {
+                if (mLocked) {
+                    AHardwareBuffer_unlock(mBuffer, nullptr);
+                }
+                AHardwareBuffer_release(mBuffer);
+            }
+
+            void setLocked() { mLocked = true; }
+
+        private:
+            AHardwareBuffer *mBuffer;
+            bool mLocked;
+        } autoCloser(buffer);
+        AHardwareBuffer_Planes planes;
+        ret = AHardwareBuffer_lockPlanes(
+                buffer,
+                AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+                -1,       // fence
+                nullptr,  // rect
+                &planes);
+        if (ret != 0) {
+            AHardwareBuffer_release(buffer);
+            return FLEX_LAYOUT_UNKNOWN;
+        }
+        autoCloser.setLocked();
+        if (planes.planeCount != 3) {
+            return FLEX_LAYOUT_UNKNOWN;
+        }
+        if (planes.planes[0].pixelStride != 1) {
+            return FLEX_LAYOUT_UNKNOWN;
+        }
+        if (planes.planes[1].pixelStride == 1 && planes.planes[2].pixelStride == 1) {
+            return FLEX_LAYOUT_PLANAR;
+        }
+        if (planes.planes[1].pixelStride == 2 && planes.planes[2].pixelStride == 2) {
+            ssize_t uvDist =
+                static_cast<uint8_t *>(planes.planes[2].data) -
+                static_cast<uint8_t *>(planes.planes[1].data);
+            if (uvDist == 1) {
+                return FLEX_LAYOUT_SEMIPLANAR_UV;
+            } else if (uvDist == -1) {
+                return FLEX_LAYOUT_SEMIPLANAR_VU;
+            }
+            return FLEX_LAYOUT_UNKNOWN;
+        }
+        return FLEX_LAYOUT_UNKNOWN;
+    }();
+    return sLayout;
+}
+
 MediaImage2 CreateYUV420PlanarMediaImage2(
         uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride) {
     return MediaImage2 {
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index afadf00..af29e81 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -96,6 +96,11 @@
 bool IsNV12(const C2GraphicView &view);
 
 /**
+ * Returns true iff a view has a NV21 layout.
+ */
+bool IsNV21(const C2GraphicView &view);
+
+/**
  * Returns true iff a view has a I420 layout.
  */
 bool IsI420(const C2GraphicView &view);
@@ -111,10 +116,26 @@
 bool IsNV12(const MediaImage2 *img);
 
 /**
+ * Returns true iff a MediaImage2 has a NV21 layout.
+ */
+bool IsNV21(const MediaImage2 *img);
+
+/**
  * Returns true iff a MediaImage2 has a I420 layout.
  */
 bool IsI420(const MediaImage2 *img);
 
+enum FlexLayout {
+    FLEX_LAYOUT_UNKNOWN,
+    FLEX_LAYOUT_PLANAR,
+    FLEX_LAYOUT_SEMIPLANAR_UV,
+    FLEX_LAYOUT_SEMIPLANAR_VU,
+};
+/**
+ * Returns layout of YCBCR_420_888 pixel format.
+ */
+FlexLayout GetYuv420FlexibleLayout();
+
 /**
  * A raw memory block to use for internal buffers.
  *
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 85623b8..a8528df 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -30,10 +30,15 @@
 #include <C2ErrnoUtils.h>
 #include <C2HandleIonInternal.h>
 
+#include <android-base/properties.h>
+
 namespace android {
 
 namespace {
     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+
+    // max padding after ion/dmabuf allocations in bytes
+    constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
 }
 
 /* size_t <=> int(lo), int(hi) conversions */
@@ -376,14 +381,34 @@
         unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
     int bufferFd = -1;
     ion_user_handle_t buffer = -1;
-    size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
+    // NOTE: read this property directly from the property as this code has to run on
+    // Android Q, but the sysprop was only introduced in Android S.
+    static size_t sPadding =
+        base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
+    if (sPadding > SIZE_MAX - size) {
+        ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
+        // use ImplV2 as there is no allocation anyways
+        return new ImplV2(ionFd, size, -1, id, -ENOMEM);
+    }
+
+    size_t allocSize = size + sPadding;
+    if (align) {
+        if (align - 1 > SIZE_MAX - allocSize) {
+            ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
+                  size, sPadding, align);
+            // use ImplV2 as there is no allocation anyways
+            return new ImplV2(ionFd, size, -1, id, -ENOMEM);
+        }
+        allocSize += align - 1;
+        allocSize &= ~(align - 1);
+    }
     int ret;
 
     if (ion_is_legacy(ionFd)) {
-        ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
+        ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
         ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
               "returned (%d) ; buffer = %d",
-              ionFd, alignedSize, align, heapMask, flags, ret, buffer);
+              ionFd, allocSize, align, heapMask, flags, ret, buffer);
         if (ret == 0) {
             // get buffer fd for native handle constructor
             ret = ion_share(ionFd, buffer, &bufferFd);
@@ -392,15 +417,15 @@
                 buffer = -1;
             }
         }
-        return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
+        return new Impl(ionFd, allocSize, bufferFd, buffer, id, ret);
 
     } else {
-        ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd);
+        ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
         ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
               "returned (%d) ; bufferFd = %d",
-              ionFd, alignedSize, align, heapMask, flags, ret, bufferFd);
+              ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
 
-        return new ImplV2(ionFd, alignedSize, bufferFd, id, ret);
+        return new ImplV2(ionFd, allocSize, bufferFd, id, ret);
     }
 }
 
diff --git a/media/codec2/vndk/C2DmaBufAllocator.cpp b/media/codec2/vndk/C2DmaBufAllocator.cpp
index 750aa31..6d8552a 100644
--- a/media/codec2/vndk/C2DmaBufAllocator.cpp
+++ b/media/codec2/vndk/C2DmaBufAllocator.cpp
@@ -16,11 +16,13 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "C2DmaBufAllocator"
+
 #include <BufferAllocator/BufferAllocator.h>
 #include <C2Buffer.h>
 #include <C2Debug.h>
 #include <C2DmaBufAllocator.h>
 #include <C2ErrnoUtils.h>
+
 #include <linux/ion.h>
 #include <sys/mman.h>
 #include <unistd.h>  // getpagesize, size_t, close, dup
@@ -28,14 +30,15 @@
 
 #include <list>
 
-#ifdef __ANDROID_APEX__
 #include <android-base/properties.h>
-#endif
 
 namespace android {
 
 namespace {
-constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+    constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+
+    // max padding after ion/dmabuf allocations in bytes
+    constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
 }
 
 /* =========================== BUFFER HANDLE =========================== */
@@ -250,8 +253,11 @@
     int ret = 0;
 
     bufferFd = alloc.Alloc(heap_name, size, flags);
-    if (bufferFd < 0) ret = bufferFd;
+    if (bufferFd < 0) {
+        ret = bufferFd;
+    }
 
+    // this may be a non-working handle if bufferFd is negative
     mHandle = C2HandleBuf(bufferFd, size);
     mId = id;
     mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
@@ -360,8 +366,22 @@
         return ret;
     }
 
+    // TODO: should we pad before mapping usage?
+
+    // NOTE: read this property directly from the property as this code has to run on
+    // Android Q, but the sysprop was only introduced in Android S.
+    static size_t sPadding =
+        base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
+    if (sPadding > SIZE_MAX - capacity) {
+        // size would overflow
+        ALOGD("dmabuf_alloc: size #%x cannot accommodate padding #%zx", capacity, sPadding);
+        return C2_NO_MEMORY;
+    }
+
+    size_t allocSize = (size_t)capacity + sPadding;
+    // TODO: should we align allocation size to mBlockSize to reflect the true allocation size?
     std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
-            mBufferAllocator, capacity, heap_name, flags, getId());
+            mBufferAllocator, allocSize, heap_name, flags, getId());
     ret = alloc->status();
     if (ret == C2_OK) {
         *allocation = alloc;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 314a822..b1d72e8 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2345,7 +2345,7 @@
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
 
-            AMediaFormat_setBuffer(mLastTrack->meta, 
+            AMediaFormat_setBuffer(mLastTrack->meta,
                     AMEDIAFORMAT_KEY_ESDS, &buffer[4], chunk_data_size - 4);
 
             if (mPath.size() >= 2
@@ -2427,7 +2427,7 @@
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
 
-            AMediaFormat_setBuffer(mLastTrack->meta, 
+            AMediaFormat_setBuffer(mLastTrack->meta,
                     AMEDIAFORMAT_KEY_CSD_AVC, buffer.get(), chunk_data_size);
 
             break;
@@ -2449,7 +2449,7 @@
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
 
-            AMediaFormat_setBuffer(mLastTrack->meta, 
+            AMediaFormat_setBuffer(mLastTrack->meta,
                     AMEDIAFORMAT_KEY_CSD_HEVC, buffer.get(), chunk_data_size);
 
             *offset += chunk_size;
@@ -4021,13 +4021,13 @@
                 // custom genre string
                 buffer[size] = '\0';
 
-                AMediaFormat_setString(mFileMetaData, 
+                AMediaFormat_setString(mFileMetaData,
                         metadataKey, (const char *)buffer + 8);
             }
         } else {
             buffer[size] = '\0';
 
-            AMediaFormat_setString(mFileMetaData, 
+            AMediaFormat_setString(mFileMetaData,
                     metadataKey, (const char *)buffer + 8);
         }
     }
@@ -4568,6 +4568,9 @@
 
     if (objectTypeIndication == 0x6B || objectTypeIndication == 0x69) {
         // mp3 audio
+        if (mLastTrack == NULL)
+            return ERROR_MALFORMED;
+
         AMediaFormat_setString(mLastTrack->meta,AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
         return OK;
     }
@@ -4658,6 +4661,10 @@
         if (offset >= csd_size || csd[offset] != 0x01) {
             return ERROR_MALFORMED;
         }
+
+        if (mLastTrack == NULL) {
+            return ERROR_MALFORMED;
+        }
         // formerly kKeyVorbisInfo
         AMediaFormat_setBuffer(mLastTrack->meta,
                 AMEDIAFORMAT_KEY_CSD_0, &csd[offset], len1);
@@ -6187,9 +6194,13 @@
         if (newBuffer) {
             if (mIsPcm) {
                 // The twos' PCM block reader assumes that all samples has the same size.
-
-                uint32_t samplesToRead = mSampleTable->getLastSampleIndexInChunk()
-                                                      - mCurrentSampleIndex + 1;
+                uint32_t lastSampleIndexInChunk = mSampleTable->getLastSampleIndexInChunk();
+                if (lastSampleIndexInChunk < mCurrentSampleIndex) {
+                    mBuffer->release();
+                    mBuffer = nullptr;
+                    return AMEDIA_ERROR_UNKNOWN;
+                }
+                uint32_t samplesToRead = lastSampleIndexInChunk - mCurrentSampleIndex + 1;
                 if (samplesToRead > kMaxPcmFrameSize) {
                     samplesToRead = kMaxPcmFrameSize;
                 }
@@ -6198,13 +6209,17 @@
                       samplesToRead, size, mCurrentSampleIndex,
                       mSampleTable->getLastSampleIndexInChunk());
 
-               size_t totalSize = samplesToRead * size;
+                size_t totalSize = samplesToRead * size;
+                if (mBuffer->size() < totalSize) {
+                    mBuffer->release();
+                    mBuffer = nullptr;
+                    return AMEDIA_ERROR_UNKNOWN;
+                }
                 uint8_t* buf = (uint8_t *)mBuffer->data();
                 ssize_t bytesRead = mDataSource->readAt(offset, buf, totalSize);
                 if (bytesRead < (ssize_t)totalSize) {
                     mBuffer->release();
                     mBuffer = NULL;
-
                     return AMEDIA_ERROR_IO;
                 }
 
@@ -6258,7 +6273,19 @@
                 if (isSyncSample) {
                     AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
                 }
- 
+
+                AMediaFormat_setInt64(
+                        meta, "sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET*/,
+                        offset);
+
+                if (mSampleTable != nullptr &&
+                        mCurrentSampleIndex == mSampleTable->getLastSampleIndexInChunk()) {
+                    AMediaFormat_setInt64(
+                    meta,
+                    "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+                    mSampleTable->getLastSampleIndexInChunk());
+                }
+
                 ++mCurrentSampleIndex;
             }
         }
@@ -6408,6 +6435,17 @@
             AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
         }
 
+        AMediaFormat_setInt64(
+                meta, "sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET*/, offset);
+
+        if (mSampleTable != nullptr &&
+                mCurrentSampleIndex == mSampleTable->getLastSampleIndexInChunk()) {
+            AMediaFormat_setInt64(
+                    meta,
+                    "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+                    mSampleTable->getLastSampleIndexInChunk());
+        }
+
         ++mCurrentSampleIndex;
 
         *out = mBuffer;
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 22cf254..3333925 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -74,8 +74,9 @@
      * The nominal range of the data is [-1.0f, 1.0f).
      * Values outside that range may be clipped.
      *
-     * See also 'floatData' at
-     * https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
+     * See also the float Data in
+     * <a href="/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)">
+     *   write(float[], int, int, int)</a>.
      */
     AAUDIO_FORMAT_PCM_FLOAT,
 
@@ -196,21 +197,69 @@
 };
 typedef int32_t  aaudio_result_t;
 
+/**
+ * AAudio Stream states, for details, refer to
+ * <a href="/ndk/guides/audio/aaudio/aaudio#using-streams">Using an Audio Stream</a>
+ */
 enum
 {
+
+    /**
+     * The stream is created but not initialized yet.
+     */
     AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+    /**
+     * The stream is in an unrecognized state.
+     */
     AAUDIO_STREAM_STATE_UNKNOWN,
+
+    /**
+     * The stream is open and ready to use.
+     */
     AAUDIO_STREAM_STATE_OPEN,
+    /**
+     * The stream is just starting up.
+     */
     AAUDIO_STREAM_STATE_STARTING,
+    /**
+     * The stream has started.
+     */
     AAUDIO_STREAM_STATE_STARTED,
+    /**
+     * The stream is pausing.
+     */
     AAUDIO_STREAM_STATE_PAUSING,
+    /**
+     * The stream has paused, could be restarted or flushed.
+     */
     AAUDIO_STREAM_STATE_PAUSED,
+    /**
+     * The stream is being flushed.
+     */
     AAUDIO_STREAM_STATE_FLUSHING,
+    /**
+     * The stream is flushed, ready to be restarted.
+     */
     AAUDIO_STREAM_STATE_FLUSHED,
+    /**
+     * The stream is stopping.
+     */
     AAUDIO_STREAM_STATE_STOPPING,
+    /**
+     * The stream has been stopped.
+     */
     AAUDIO_STREAM_STATE_STOPPED,
+    /**
+     * The stream is closing.
+     */
     AAUDIO_STREAM_STATE_CLOSING,
+    /**
+     * The stream has been closed.
+     */
     AAUDIO_STREAM_STATE_CLOSED,
+    /**
+     * The stream is disconnected from audio device.
+     */
     AAUDIO_STREAM_STATE_DISCONNECTED
 };
 typedef int32_t aaudio_stream_state_t;
@@ -260,7 +309,8 @@
  * This information is used by certain platforms or routing policies
  * to make more refined volume or routing decisions.
  *
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
  * in the Android Java API.
  *
  * Added in API level 28.
@@ -361,7 +411,8 @@
  * an audio book application) this information might be used by the audio framework to
  * enforce audio focus.
  *
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
  * in the Android Java API.
  *
  * Added in API level 28.
@@ -441,7 +492,8 @@
 /**
  * Specifying if audio may or may not be captured by other apps or the system.
  *
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
  * in the Android Java API.
  *
  * Added in API level 29.
@@ -453,10 +505,11 @@
      * For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
      * AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
      *
-     * On {@link android.os.Build.VERSION_CODES#Q}, this means only {@link #AAUDIO_USAGE_MEDIA}
-     * and {@link #AAUDIO_USAGE_GAME} may be captured.
+     * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Build.VERSION_CODES</a>,
+     * this means only {@link #AAUDIO_USAGE_MEDIA} and {@link #AAUDIO_USAGE_GAME} may be captured.
      *
-     * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_ALL}.
+     * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_ALL">
+     * ALLOW_CAPTURE_BY_ALL</a>.
      */
     AAUDIO_ALLOW_CAPTURE_BY_ALL = 1,
     /**
@@ -464,8 +517,9 @@
      *
      * System apps can capture for many purposes like accessibility, user guidance...
      * but have strong restriction. See
-     * {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_SYSTEM} for what the system apps
-     * can do with the capture audio.
+     * <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_SYSTEM">
+     * ALLOW_CAPTURE_BY_SYSTEM</a>
+     * for what the system apps can do with the capture audio.
      */
     AAUDIO_ALLOW_CAPTURE_BY_SYSTEM = 2,
     /**
@@ -473,7 +527,8 @@
      *
      * It is encouraged to use {@link #AAUDIO_ALLOW_CAPTURE_BY_SYSTEM} instead of this value as system apps
      * provide significant and useful features for the user (eg. accessibility).
-     * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_NONE}.
+     * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_NONE">
+     * ALLOW_CAPTURE_BY_NONE</a>.
      */
     AAUDIO_ALLOW_CAPTURE_BY_NONE = 3,
 };
@@ -803,7 +858,9 @@
  * The default is {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}.
  *
  * Note that an application can also set its global policy, in which case the most restrictive
- * policy is always applied. See {@link android.media.AudioAttributes#setAllowedCapturePolicy(int)}
+ * policy is always applied. See
+ * <a href="/reference/android/media/AudioManager#setAllowedCapturePolicy(int)">
+ * setAllowedCapturePolicy(int)</a>
  *
  * Available since API level 29.
  *
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 5d311fc..1bbe443 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -268,7 +268,7 @@
 
         if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
-            result = systemStopFromCallback();
+            result = systemStopInternal();
             break;
         }
     }
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index b81e5e4..3f17e6b 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -301,7 +301,7 @@
             }
         } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
-            result = systemStopFromCallback();
+            result = systemStopInternal();
             break;
         }
     }
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 53523c5..e8f71be 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -143,13 +143,13 @@
 }
 
 aaudio_result_t AudioStream::systemStart() {
-    std::lock_guard<std::mutex> lock(mStreamLock);
-
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
+    std::lock_guard<std::mutex> lock(mStreamLock);
+
     switch (getState()) {
         // Is this a good time to start?
         case AAUDIO_STREAM_STATE_OPEN:
@@ -187,7 +187,6 @@
 }
 
 aaudio_result_t AudioStream::systemPause() {
-    std::lock_guard<std::mutex> lock(mStreamLock);
 
     if (!isPauseSupported()) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
@@ -198,6 +197,7 @@
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
+    std::lock_guard<std::mutex> lock(mStreamLock);
     switch (getState()) {
         // Proceed with pausing.
         case AAUDIO_STREAM_STATE_STARTING:
@@ -242,12 +242,12 @@
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
         ALOGE("stream cannot be flushed from a callback!");
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
+    std::lock_guard<std::mutex> lock(mStreamLock);
     aaudio_result_t result = AAudio_isFlushAllowed(getState());
     if (result != AAUDIO_OK) {
         return result;
@@ -256,7 +256,7 @@
     return requestFlush_l();
 }
 
-aaudio_result_t AudioStream::systemStopFromCallback() {
+aaudio_result_t AudioStream::systemStopInternal() {
     std::lock_guard<std::mutex> lock(mStreamLock);
     aaudio_result_t result = safeStop_l();
     if (result == AAUDIO_OK) {
@@ -267,17 +267,12 @@
 }
 
 aaudio_result_t AudioStream::systemStopFromApp() {
-    std::lock_guard<std::mutex> lock(mStreamLock);
+    // This check can and should be done outside the lock.
     if (collidesWithCallback()) {
         ALOGE("stream cannot be stopped by calling from a callback!");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    aaudio_result_t result = safeStop_l();
-    if (result == AAUDIO_OK) {
-        // We only call this for logging in "dumpsys audio". So ignore return code.
-        (void) mPlayerBase->stopWithStatus();
-    }
-    return result;
+    return systemStopInternal();
 }
 
 aaudio_result_t AudioStream::safeStop_l() {
@@ -316,12 +311,12 @@
 }
 
 aaudio_result_t AudioStream::safeRelease() {
-    // This may get temporarily unlocked in the MMAP release() when joining callback threads.
-    std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    // This may get temporarily unlocked in the MMAP release() when joining callback threads.
+    std::lock_guard<std::mutex> lock(mStreamLock);
     if (getState() == AAUDIO_STREAM_STATE_CLOSING) { // already released?
         return AAUDIO_OK;
     }
@@ -329,17 +324,14 @@
 }
 
 aaudio_result_t AudioStream::safeReleaseClose() {
-    // This get temporarily unlocked in the MMAP release() when joining callback threads.
-    std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    releaseCloseFinal_l();
-    return AAUDIO_OK;
+    return safeReleaseCloseInternal();
 }
 
-aaudio_result_t AudioStream::safeReleaseCloseFromCallback() {
+aaudio_result_t AudioStream::safeReleaseCloseInternal() {
     // This get temporarily unlocked in the MMAP release() when joining callback threads.
     std::lock_guard<std::mutex> lock(mStreamLock);
     releaseCloseFinal_l();
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 333e665..abf62f3 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -408,7 +408,7 @@
     /**
      * This is called internally when an app callback returns AAUDIO_CALLBACK_RESULT_STOP.
      */
-    aaudio_result_t systemStopFromCallback();
+    aaudio_result_t systemStopInternal();
 
     /**
      * Safely RELEASE a stream after taking mStreamLock and checking
@@ -424,7 +424,7 @@
      */
     aaudio_result_t safeReleaseClose();
 
-    aaudio_result_t safeReleaseCloseFromCallback();
+    aaudio_result_t safeReleaseCloseInternal();
 
 protected:
 
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index fdaa2ab..60eb73a 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -124,7 +124,7 @@
                               __func__, callbackResult);
                     }
                     audioBuffer->size = 0;
-                    systemStopFromCallback();
+                    systemStopInternal();
                     // Disable the callback just in case the system keeps trying to call us.
                     mCallbackEnabled.store(false);
                 }
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 62c9b46..f9eebd7 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -250,3 +250,16 @@
         "libutils",
     ],
 }
+
+
+cc_test {
+    name: "test_disconnect_race",
+    defaults: ["libaaudio_tests_defaults"],
+    srcs: ["test_disconnect_race.cpp"],
+    shared_libs: [
+        "libaaudio",
+        "libbinder",
+        "libcutils",
+        "libutils",
+    ],
+}
diff --git a/media/libaaudio/tests/test_disconnect_race.cpp b/media/libaaudio/tests/test_disconnect_race.cpp
new file mode 100644
index 0000000..6dbe165
--- /dev/null
+++ b/media/libaaudio/tests/test_disconnect_race.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test whether an error callback is joined before the close finishes.
+ *
+ * Start a stream with a callback.
+ * The callback just sleeps for a long time.
+ * While the callback is sleeping, close() the stream from the main thread.
+ * Then check to make sure the callback was joined before the close() returns.
+ *
+ * This can hang if there are deadlocks. So make sure you get a PASSED result.
+ */
+
+#include <atomic>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+// Sleep long enough that the foreground has a chance to call close.
+static constexpr int kCallbackSleepMillis = 1000;
+static constexpr int kPollSleepMillis     =  100;
+
+static int sErrorCount = 0;
+
+#define MY_ASSERT_TRUE(statement) \
+    if (!(statement)) { \
+        printf("ERROR line:%d - " #statement "\n", __LINE__); \
+        sErrorCount++; \
+        return false; \
+    }
+
+#define MY_ASSERT_EQ(aa,bb) MY_ASSERT_TRUE(((aa) == (bb)))
+#define MY_ASSERT_NE(aa,bb) MY_ASSERT_TRUE(((aa) != (bb)))
+
+class AudioEngine {
+public:
+
+    // Check for a crash or late callback if we close without stopping.
+    bool checkCloseJoins(aaudio_direction_t direction,
+                             aaudio_performance_mode_t perfMode,
+                             bool callStopFromCallback) {
+        mCallStopFromCallback = callStopFromCallback;
+
+        if (!startStreamForStall(direction, perfMode)) return false;
+
+        printf("--------------------------------------------------------\n");
+        printf("%s() - direction = %d, perfMode = %d, callStop = %d\n",
+            __func__, direction, perfMode, callStopFromCallback);
+
+        // When the callback starts it will go to sleep.
+        if (!waitForCallbackToStart()) return false;
+
+        printf("call AAudioStream_close()\n");
+        MY_ASSERT_TRUE(!mCallbackFinished); // Still sleeping?
+        aaudio_result_t result = AAudioStream_close(mStream); // May hang here!
+        if (mCallbackStarted) {
+            MY_ASSERT_TRUE(mCallbackFinished);
+        }
+        MY_ASSERT_EQ(AAUDIO_OK, result);
+        printf("AAudioStream_close() returned %d\n", result);
+
+        MY_ASSERT_EQ(AAUDIO_ERROR_DISCONNECTED, mError.load());
+        if (mCallStopFromCallback) {
+            // Did calling stop() from callback fail? It should have.
+            MY_ASSERT_NE(AAUDIO_OK, mStopResult.load());
+        }
+
+        return true;
+    }
+
+private:
+    bool startStreamForStall(aaudio_direction_t direction,
+                             aaudio_performance_mode_t perfMode) {
+        AAudioStreamBuilder* builder = nullptr;
+        aaudio_result_t result = AAUDIO_OK;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        result = AAudio_createStreamBuilder(&builder);
+        MY_ASSERT_EQ(AAUDIO_OK, result);
+
+        // Request stream properties.
+        AAudioStreamBuilder_setDirection(builder, direction);
+        AAudioStreamBuilder_setPerformanceMode(builder, perfMode);
+        AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, this);
+        AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, this);
+
+        // Create an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(builder, &mStream);
+        AAudioStreamBuilder_delete(builder);
+        MY_ASSERT_EQ(AAUDIO_OK, result);
+
+        // Check to see what kind of stream we actually got.
+        int32_t deviceId = AAudioStream_getDeviceId(mStream);
+        aaudio_performance_mode_t
+            actualPerfMode = AAudioStream_getPerformanceMode(mStream);
+        printf("-------- opened: deviceId = %3d, perfMode = %d\n",
+               deviceId,
+               actualPerfMode);
+
+        // Start stream.
+        result = AAudioStream_requestStart(mStream);
+        MY_ASSERT_EQ(AAUDIO_OK, result);
+
+        return true;
+    }
+
+    bool waitForCallbackToStart() {
+        // Wait for callback to say it has been called.
+        int countDown = 10 * 1000 / kPollSleepMillis;
+        while (!mCallbackStarted && countDown > 0) {
+            if ((countDown % 5) == 0) {
+                printf("===== Please PLUG or UNPLUG headphones! ======= %d\n", countDown);
+            }
+            usleep(kPollSleepMillis * 1000);
+            countDown--;
+        }
+        MY_ASSERT_TRUE(countDown > 0);
+        MY_ASSERT_TRUE(mCallbackStarted);
+        return true;
+    }
+
+// Callback function that fills the audio output buffer.
+    static aaudio_data_callback_result_t s_myDataCallbackProc(
+            AAudioStream * /* stream */,
+            void * /* userData */,
+            void * /* audioData */,
+            int32_t /* numFrames */
+    ) {
+        return AAUDIO_CALLBACK_RESULT_CONTINUE;
+    }
+
+    static void s_myErrorCallbackProc(
+                AAudioStream * stream,
+                void *userData,
+                aaudio_result_t error) {
+        AudioEngine *engine = (AudioEngine *)userData;
+        engine->mError = error;
+        engine->mCallbackStarted = true;
+        usleep(kCallbackSleepMillis * 1000);
+        // it is illegal to call stop() from the callback. It should
+        // return an error and not hang.
+        if (engine->mCallStopFromCallback) {
+            engine->mStopResult = AAudioStream_requestStop(stream);
+        }
+        engine->mCallbackFinished = true;
+    }
+
+    AAudioStream* mStream = nullptr;
+
+    std::atomic<aaudio_result_t> mError{AAUDIO_OK}; // written by error callback
+    std::atomic<bool> mCallStopFromCallback{false};
+    std::atomic<bool> mCallbackStarted{false};   // written by error callback
+    std::atomic<bool> mCallbackFinished{false};  // written by error callback
+    std::atomic<aaudio_result_t> mStopResult{AAUDIO_OK};
+};
+
+int main(int, char **) {
+    // Parameters to test.
+    static aaudio_direction_t directions[] = {AAUDIO_DIRECTION_OUTPUT,
+                                            AAUDIO_DIRECTION_INPUT};
+    static aaudio_performance_mode_t perfModes[] =
+        {AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_PERFORMANCE_MODE_NONE};
+    static bool callStops[] = { false, true };
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+    printf("Test Disconnect Race V1.0\n");
+    printf("\n");
+
+    for (auto callStop : callStops) {
+        for (auto direction : directions) {
+            for (auto perfMode : perfModes) {
+                AudioEngine engine;
+                engine.checkCloseJoins(direction, perfMode, callStop);
+            }
+        }
+    }
+
+    printf("Error Count = %d, %s\n", sErrorCount,
+           ((sErrorCount == 0) ? "PASS" : "FAIL"));
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 64a335a..19d68a0 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -348,6 +348,7 @@
         "aidl/android/media/AudioUniqueIdUse.aidl",
         "aidl/android/media/AudioUsage.aidl",
         "aidl/android/media/AudioUuid.aidl",
+        "aidl/android/media/AudioVibratorInfo.aidl",
         "aidl/android/media/EffectDescriptor.aidl",
         "aidl/android/media/ExtraAudioDescriptor.aidl",
     ],
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index f476b7d..0bc592d 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -2258,6 +2258,15 @@
     return NO_ERROR;
 }
 
+status_t AudioSystem::setVibratorInfos(
+        const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->setVibratorInfos(vibratorInfos);
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 4103630..0feafc5 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -733,6 +733,11 @@
     return statusTFromBinderStatus(mDelegate->setAudioHalPids(pidsAidl));
 }
 
+status_t AudioFlingerClientAdapter::setVibratorInfos(
+        const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+    return statusTFromBinderStatus(mDelegate->setVibratorInfos(vibratorInfos));
+}
+
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // AudioFlingerServerAdapter
@@ -1174,4 +1179,9 @@
     return Status::ok();
 }
 
+Status AudioFlingerServerAdapter::setVibratorInfos(
+        const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+    return Status::fromStatusT(mDelegate->setVibratorInfos(vibratorInfos));
+}
+
 } // namespace android
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
new file mode 100644
index 0000000..f88fc3c
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * {@hide}
+ * A class for vibrator information. The information will be used in HapticGenerator effect.
+ */
+parcelable AudioVibratorInfo {
+    int id;
+    float resonantFrequency;
+    float qFactor;
+}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index e63f391..abbced5 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -23,6 +23,7 @@
 import android.media.AudioStreamType;
 import android.media.AudioUniqueIdUse;
 import android.media.AudioUuid;
+import android.media.AudioVibratorInfo;
 import android.media.CreateEffectRequest;
 import android.media.CreateEffectResponse;
 import android.media.CreateRecordRequest;
@@ -202,4 +203,8 @@
     MicrophoneInfoData[] getMicrophones();
 
     void setAudioHalPids(in int[] /* pid_t[] */ pids);
+
+    // Set vibrators' information.
+    // The value will be used to initialize HapticGenerator.
+    void setVibratorInfos(in AudioVibratorInfo[] vibratorInfos);
 }
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index c63d29f..4c99dbd 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,9 +19,10 @@
 
 #include <sys/types.h>
 
-#include <android/media/permission/Identity.h>
+#include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerClient.h>
 #include <android/media/BnAudioPolicyServiceClient.h>
+#include <android/media/permission/Identity.h>
 #include <media/AidlConversionUtil.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioPolicy.h>
@@ -553,6 +554,8 @@
 
     static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
 
+    static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+
 private:
 
     class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index efd7fed..7f7ca85 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -35,6 +35,7 @@
 #include <string>
 #include <vector>
 
+#include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerService.h>
 #include <android/media/BpAudioFlingerService.h>
 #include <android/media/permission/Identity.h>
@@ -331,6 +332,11 @@
     virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
 
     virtual status_t setAudioHalPids(const std::vector<pid_t>& pids) = 0;
+
+    // Set vibrators' information.
+    // The values will be used to initialize HapticGenerator.
+    virtual status_t setVibratorInfos(
+            const std::vector<media::AudioVibratorInfo>& vibratorInfos) = 0;
 };
 
 /**
@@ -422,6 +428,7 @@
     size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
     status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
     status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
+    status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
 
 private:
     const sp<media::IAudioFlingerService> mDelegate;
@@ -504,6 +511,7 @@
             GET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_getMasterBalance,
             SET_EFFECT_SUSPENDED = media::BnAudioFlingerService::TRANSACTION_setEffectSuspended,
             SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
+            SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
         };
 
         /**
@@ -605,6 +613,7 @@
     Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
     Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
     Status setAudioHalPids(const std::vector<int32_t>& pids) override;
+    Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
 
 private:
     const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 2a3e2b6..539a149 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -57,8 +57,7 @@
     // Note: This assumes channel mask, format, and sample rate do not change after creation.
     audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
     if (/* mStreamPowerLog.isUserDebugOrEngBuild() && */
-        StreamHalHidl::getAudioProperties(
-                &config.sample_rate, &config.channel_mask, &config.format) == NO_ERROR) {
+        StreamHalHidl::getAudioProperties(&config) == NO_ERROR) {
         mStreamPowerLog.init(config.sample_rate, config.channel_mask, config.format);
     }
 }
@@ -69,14 +68,6 @@
     hardware::IPCThreadState::self()->flushCommands();
 }
 
-// Note: this method will be removed
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
-    audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
-    status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
-    *rate = config.sample_rate;
-    return status;
-}
-
 status_t StreamHalHidl::getBufferSize(size_t *size) {
     if (!mStream) return NO_INIT;
     status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
@@ -86,48 +77,28 @@
     return status;
 }
 
-// Note: this method will be removed
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
-    audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
-    status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
-    *mask = config.channel_mask;
-    return status;
-}
-
-// Note: this method will be removed
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
-    audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
-    status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
-    *format = config.format;
-    return status;
-}
-
-status_t StreamHalHidl::getAudioProperties(
-        uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+status_t StreamHalHidl::getAudioProperties(audio_config_base_t *configBase) {
+    *configBase = AUDIO_CONFIG_BASE_INITIALIZER;
     if (!mStream) return NO_INIT;
 #if MAJOR_VERSION <= 6
     Return<void> ret = mStream->getAudioProperties(
             [&](uint32_t sr, auto m, auto f) {
-                *sampleRate = sr;
-                *mask = static_cast<audio_channel_mask_t>(m);
-                *format = static_cast<audio_format_t>(f);
+                configBase->sample_rate = sr;
+                configBase->channel_mask = static_cast<audio_channel_mask_t>(m);
+                configBase->format = static_cast<audio_format_t>(f);
             });
     return processReturn("getAudioProperties", ret);
 #else
     Result retval;
     status_t conversionStatus = BAD_VALUE;
-    audio_config_base_t halConfig = AUDIO_CONFIG_BASE_INITIALIZER;
     Return<void> ret = mStream->getAudioProperties(
             [&](Result r, const AudioConfigBase& config) {
                 retval = r;
                 if (retval == Result::OK) {
-                    conversionStatus = HidlUtils::audioConfigBaseToHal(config, &halConfig);
+                    conversionStatus = HidlUtils::audioConfigBaseToHal(config, configBase);
                 }
             });
     if (status_t status = processReturn("getAudioProperties", ret, retval); status == NO_ERROR) {
-        *sampleRate = halConfig.sample_rate;
-        *mask = halConfig.channel_mask;
-        *format = halConfig.format;
         return conversionStatus;
     } else {
         return status;
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index c6db6d6..970903b 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -49,21 +49,14 @@
 class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
 {
   public:
-    // Return the sampling rate in Hz - eg. 44100.
-    virtual status_t getSampleRate(uint32_t *rate);
-
     // Return size of input/output buffer in bytes for this stream - eg. 4800.
     virtual status_t getBufferSize(size_t *size);
 
-    // Return the channel mask.
-    virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
-    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
-    virtual status_t getFormat(audio_format_t *format);
-
-    // Convenience method.
-    virtual status_t getAudioProperties(
-            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+    // Return the base configuration of the stream:
+    //   - channel mask;
+    //   - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+    //   - sampling rate in Hz - eg. 44100.
+    virtual status_t getAudioProperties(audio_config_base_t *configBase);
 
     // Set audio stream parameters.
     virtual status_t setParameters(const String8& kvPairs);
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index e89b288..d0c375e 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -45,31 +45,15 @@
     mDevice.clear();
 }
 
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
-    *rate = mStream->get_sample_rate(mStream);
-    return OK;
-}
-
 status_t StreamHalLocal::getBufferSize(size_t *size) {
     *size = mStream->get_buffer_size(mStream);
     return OK;
 }
 
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
-    *mask = mStream->get_channels(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
-    *format = mStream->get_format(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
-        uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
-    *sampleRate = mStream->get_sample_rate(mStream);
-    *mask = mStream->get_channels(mStream);
-    *format = mStream->get_format(mStream);
+status_t StreamHalLocal::getAudioProperties(audio_config_base_t *configBase) {
+    configBase->sample_rate = mStream->get_sample_rate(mStream);
+    configBase->channel_mask = mStream->get_channels(mStream);
+    configBase->format = mStream->get_format(mStream);
     return OK;
 }
 
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index e228104..b260495 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -28,21 +28,14 @@
 class StreamHalLocal : public virtual StreamHalInterface
 {
   public:
-    // Return the sampling rate in Hz - eg. 44100.
-    virtual status_t getSampleRate(uint32_t *rate);
-
     // Return size of input/output buffer in bytes for this stream - eg. 4800.
     virtual status_t getBufferSize(size_t *size);
 
-    // Return the channel mask.
-    virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
-    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
-    virtual status_t getFormat(audio_format_t *format);
-
-    // Convenience method.
-    virtual status_t getAudioProperties(
-            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+    // Return the base configuration of the stream:
+    //   - channel mask;
+    //   - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+    //   - sampling rate in Hz - eg. 44100.
+    virtual status_t getAudioProperties(audio_config_base_t *configBase);
 
     // Set audio stream parameters.
     virtual status_t setParameters(const String8& kvPairs);
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index b47f536..2be12fb 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -31,25 +31,27 @@
 class StreamHalInterface : public virtual RefBase
 {
   public:
-    // TODO(mnaganov): Remove
-    // Return the sampling rate in Hz - eg. 44100.
-    virtual status_t getSampleRate(uint32_t *rate) = 0;
-
     // Return size of input/output buffer in bytes for this stream - eg. 4800.
     virtual status_t getBufferSize(size_t *size) = 0;
 
-    // TODO(mnaganov): Remove
-    // Return the channel mask.
-    virtual status_t getChannelMask(audio_channel_mask_t *mask) = 0;
+    // Return the base configuration of the stream:
+    //   - channel mask;
+    //   - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+    //   - sampling rate in Hz - eg. 44100.
+    virtual status_t getAudioProperties(audio_config_base_t *configBase) = 0;
 
-    // TODO(mnaganov): Remove
-    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
-    virtual status_t getFormat(audio_format_t *format) = 0;
-
-    // TODO(mnaganov): Change to use audio_config_base_t
     // Convenience method.
-    virtual status_t getAudioProperties(
-            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) = 0;
+    inline status_t getAudioProperties(
+            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+        audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+        const status_t result = getAudioProperties(&config);
+        if (result == NO_ERROR) {
+            if (sampleRate != nullptr) *sampleRate = config.sample_rate;
+            if (mask != nullptr) *mask = config.channel_mask;
+            if (format != nullptr) *format = config.format;
+        }
+        return result;
+    }
 
     // Set audio stream parameters.
     virtual status_t setParameters(const String8& kvPairs) = 0;
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index f2245b1..65a20a7 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -26,11 +26,16 @@
 
 #include <errno.h>
 #include <inttypes.h>
+#include <math.h>
 
 #include <audio_effects/effect_hapticgenerator.h>
 #include <audio_utils/format.h>
 #include <system/audio.h>
 
+static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
+static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
+static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
 audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
@@ -101,11 +106,11 @@
     context->param.audioChannelCount = 0;
     context->param.maxHapticIntensity = os::HapticScale::MUTE;
 
-    context->param.resonantFrequency = 150.0f;
+    context->param.resonantFrequency = DEFAULT_RESONANT_FREQUENCY;
     context->param.bpfQ = 1.0f;
     context->param.slowEnvNormalizationPower = -0.8f;
-    context->param.bsfZeroQ = 8.0f;
-    context->param.bsfPoleQ = 4.0f;
+    context->param.bsfZeroQ = DEFAULT_BSF_ZERO_Q;
+    context->param.bsfPoleQ = DEFAULT_BSF_POLE_Q;
     context->param.distortionCornerFrequency = 300.0f;
     context->param.distortionInputGain = 0.3f;
     context->param.distortionCubeThreshold = 0.1f;
@@ -173,6 +178,7 @@
     addBiquadFilter(processingChain, processorsRecord, lpf);
 
     auto bpf = createBPF(param->resonantFrequency, param->bpfQ, sampleRate, channelCount);
+    processorsRecord.bpf = bpf;
     addBiquadFilter(processingChain, processorsRecord, bpf);
 
     float normalizationPower = param->slowEnvNormalizationPower;
@@ -191,6 +197,7 @@
 
     auto bsf = createBSF(
             param->resonantFrequency, param->bsfZeroQ, param->bsfPoleQ, sampleRate, channelCount);
+    processorsRecord.bsf = bsf;
     addBiquadFilter(processingChain, processorsRecord, bsf);
 
     // The process chain captures the shared pointer of the Distortion in lambda. It will
@@ -279,7 +286,32 @@
         }
         break;
     }
+    case HG_PARAM_VIBRATOR_INFO: {
+        if (value == nullptr || size != 2 * sizeof(float)) {
+            return -EINVAL;
+        }
+        const float resonantFrequency = *(float*) value;
+        const float qFactor = *((float *) value + 1);
+        context->param.resonantFrequency =
+                isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
+        context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
+        context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
 
+        if (context->processorsRecord.bpf != nullptr) {
+            context->processorsRecord.bpf->setCoefficients(
+                    bpfCoefs(context->param.resonantFrequency,
+                             context->param.bpfQ,
+                             context->config.inputCfg.samplingRate));
+        }
+        if (context->processorsRecord.bsf != nullptr) {
+            context->processorsRecord.bsf->setCoefficients(
+                    bsfCoefs(context->param.resonantFrequency,
+                             context->param.bsfZeroQ,
+                             context->param.bsfPoleQ,
+                             context->config.inputCfg.samplingRate));
+        }
+        HapticGenerator_Reset(context);
+    } break;
     default:
         ALOGW("Unknown param: %d", param);
         return -EINVAL;
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index d2d7afe..96b744a 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -69,6 +69,11 @@
     std::vector<std::shared_ptr<Ramp>> ramps;
     std::vector<std::shared_ptr<SlowEnvelope>> slowEnvs;
     std::vector<std::shared_ptr<Distortion>> distortions;
+
+    // Cache band-pass filter and band-stop filter for updating parameters
+    // according to vibrator info
+    std::shared_ptr<HapticBiquadFilter> bpf;
+    std::shared_ptr<HapticBiquadFilter> bsf;
 };
 
 // A structure to keep all the context for HapticGenerator.
diff --git a/media/libeffects/hapticgenerator/Processors.cpp b/media/libeffects/hapticgenerator/Processors.cpp
index 79a4e2c..4fe3a75 100644
--- a/media/libeffects/hapticgenerator/Processors.cpp
+++ b/media/libeffects/hapticgenerator/Processors.cpp
@@ -211,9 +211,9 @@
 }
 
 BiquadFilterCoefficients bsfCoefs(const float ringingFrequency,
-                                  const float sampleRate,
                                   const float zq,
-                                  const float pq) {
+                                  const float pq,
+                                  const float sampleRate) {
     BiquadFilterCoefficients coefficient;
     const auto [zeroReal, zeroImg] = getComplexPoleZ(ringingFrequency, zq, sampleRate);
     float zeroCoeff1 = -2 * zeroReal;
@@ -275,7 +275,7 @@
                                         const float pq,
                                         const float sampleRate,
                                         const size_t channelCount) {
-    BiquadFilterCoefficients coefficient = bsfCoefs(ringingFrequency, sampleRate, zq, pq);
+    BiquadFilterCoefficients coefficient = bsfCoefs(ringingFrequency, zq, pq, sampleRate);
     return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
 }
 
diff --git a/media/libeffects/hapticgenerator/Processors.h b/media/libeffects/hapticgenerator/Processors.h
index 452a985..74ca77d 100644
--- a/media/libeffects/hapticgenerator/Processors.h
+++ b/media/libeffects/hapticgenerator/Processors.h
@@ -102,9 +102,9 @@
                                   const float sampleRate);
 
 BiquadFilterCoefficients bsfCoefs(const float ringingFrequency,
-                                  const float sampleRate,
                                   const float zq,
-                                  const float pq);
+                                  const float pq,
+                                  const float sampleRate);
 
 std::shared_ptr<HapticBiquadFilter> createLPF(const float cornerFrequency,
                                         const float sampleRate,
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
index 20058a1..4eea04f 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
@@ -23,6 +23,7 @@
 #include <system/audio.h>
 
 #include "LVM_Private.h"
+#include "ScalarArithmetic.h"
 #include "VectorArithmetic.h"
 #include "LVM_Coeffs.h"
 
@@ -178,6 +179,9 @@
                  * Apply the filter
                  */
                 pInstance->pTEBiquad->process(pProcessed, pProcessed, NrFrames);
+                for (auto i = 0; i < NrChannels * NrFrames; i++) {
+                    pProcessed[i] = LVM_Clamp(pProcessed[i]);
+                }
             }
             /*
              * Volume balance
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index df7ca5a..7571a24 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -53,16 +53,16 @@
 flags_arr=(
     "-csE"
     "-eqE"
-    "-tE"
-    "-csE -tE -eqE"
+    "-tE -trebleLvl:15"
+    "-csE -tE -trebleLvl:15 -eqE"
     "-bE -M"
-    "-csE -tE"
-    "-csE -eqE" "-tE -eqE"
-    "-csE -tE -bE -M -eqE"
-    "-tE -eqE -vcBal:96 -M"
-    "-tE -eqE -vcBal:-96 -M"
-    "-tE -eqE -vcBal:0 -M"
-    "-tE -eqE -bE -vcBal:30 -M"
+    "-csE -tE -trebleLvl:15"
+    "-csE -eqE" "-tE -trebleLvl:15 -eqE"
+    "-csE -tE -trebleLvl:15 -bE -M -eqE"
+    "-tE -trebleLvl:15 -eqE -vcBal:96 -M"
+    "-tE -trebleLvl:15 -eqE -vcBal:-96 -M"
+    "-tE -trebleLvl:15 -eqE -vcBal:0 -M"
+    "-tE -trebleLvl:15 -eqE -bE -vcBal:30 -M"
 )
 
 fs_arr=(
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index e484a1a..e65228c 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -79,6 +79,7 @@
     int bassEffectLevel = 0;
     int eqPresetLevel = 0;
     int frameLength = 256;
+    int trebleEffectLevel = 0;
     LVM_BE_Mode_en bassEnable = LVM_BE_OFF;
     LVM_TE_Mode_en trebleEnable = LVM_TE_OFF;
     LVM_EQNB_Mode_en eqEnable = LVM_EQNB_OFF;
@@ -303,10 +304,6 @@
     params->PSA_Enable = LVM_PSA_OFF;
     params->PSA_PeakDecayRate = LVM_PSA_SPEED_MEDIUM;
 
-    /* TE Control parameters */
-    params->TE_OperatingMode = LVM_TE_OFF;
-    params->TE_EffectLevel = 0;
-
     /* Activate the initial settings */
     LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
 
@@ -445,6 +442,7 @@
 
     /* Treble Enhancement parameters */
     params->TE_OperatingMode = plvmConfigParams->trebleEnable;
+    params->TE_EffectLevel = plvmConfigParams->trebleEffectLevel;
 
     /* PSA Control parameters */
     params->PSA_Enable = LVM_PSA_ON;
@@ -604,6 +602,15 @@
                 return -1;
             }
             lvmConfigParams.eqPresetLevel = eqPresetLevel;
+        } else if (!strncmp(argv[i], "-trebleLvl:", 11)) {
+            const int trebleEffectLevel = atoi(argv[i] + 11);
+            if (trebleEffectLevel > LVM_TE_MAX_EFFECTLEVEL ||
+                trebleEffectLevel < LVM_TE_MIN_EFFECTLEVEL) {
+                printf("Error: Unsupported Treble Effect Level : %d\n", trebleEffectLevel);
+                printUsage();
+                return -1;
+            }
+            lvmConfigParams.trebleEffectLevel = trebleEffectLevel;
         } else if (!strcmp(argv[i], "-bE")) {
             lvmConfigParams.bassEnable = LVM_BE_ON;
         } else if (!strcmp(argv[i], "-eqE")) {
diff --git a/media/libmediaformatshaper/CodecProperties.cpp b/media/libmediaformatshaper/CodecProperties.cpp
index 961f676..e6b3c46 100644
--- a/media/libmediaformatshaper/CodecProperties.cpp
+++ b/media/libmediaformatshaper/CodecProperties.cpp
@@ -23,6 +23,10 @@
 
 #include <media/formatshaper/CodecProperties.h>
 
+
+// we aren't going to mess with shaping points dimensions beyond this
+static const int32_t DIMENSION_LIMIT = 16384;
+
 namespace android {
 namespace mediaformatshaper {
 
@@ -113,7 +117,13 @@
             setBpp(bpp);
             legal = true;
         }
+    } else if (!strncmp(key.c_str(), "vq-target-bpp-", strlen("vq-target-bpp-"))) {
+            std::string resolution = key.substr(strlen("vq-target-bpp-"));
+            if (bppPoint(resolution, value)) {
+                legal = true;
+            }
     } else if (!strcmp(key.c_str(), "vq-target-bppx100")) {
+        // legacy, prototyping
         const char *p = value.c_str();
         char *q;
         int32_t iValue =  strtol(p, &q, 0);
@@ -143,6 +153,119 @@
     return false;
 }
 
+bool CodecProperties::bppPoint(std::string resolution, std::string value) {
+
+    int32_t width = 0;
+    int32_t height = 0;
+    double bpp = -1;
+
+    // resolution is "WxH", "W*H" or a standard name like "720p"
+    if (resolution == "1080p") {
+        width = 1080; height = 1920;
+    } else if (resolution == "720p") {
+        width = 720; height = 1280;
+    } else if (resolution == "540p") {
+        width = 540; height = 960;
+    } else if (resolution == "480p") {
+        width = 480; height = 854;
+    } else {
+        size_t sep = resolution.find('x');
+        if (sep == std::string::npos) {
+            sep = resolution.find('*');
+        }
+        if (sep == std::string::npos) {
+            ALOGW("unable to parse resolution: '%s'", resolution.c_str());
+            return false;
+        }
+        std::string w = resolution.substr(0, sep);
+        std::string h = resolution.substr(sep+1);
+
+        char *q;
+        const char *p = w.c_str();
+        width = strtol(p, &q, 0);
+        if (q == p) {
+                width = -1;
+        }
+        p = h.c_str();
+        height = strtol(p, &q, 0);
+        if (q == p) {
+                height = -1;
+        }
+        if (width <= 0 || height <= 0 || width > DIMENSION_LIMIT || height > DIMENSION_LIMIT) {
+            ALOGW("unparseable: width, height '%s'", resolution.c_str());
+            return false;
+        }
+    }
+
+    const char *p = value.c_str();
+    char *q;
+    bpp = strtod(p, &q);
+    if (q == p) {
+        ALOGW("unparseable bpp '%s'", value.c_str());
+        return false;
+    }
+
+    struct bpp_point *point = (struct bpp_point*) malloc(sizeof(*point));
+    if (point == nullptr) {
+        ALOGW("unable to allocate memory for bpp point");
+        return false;
+    }
+
+    point->pixels = width * height;
+    point->width = width;
+    point->height = height;
+    point->bpp = bpp;
+
+    if (mBppPoints == nullptr) {
+        point->next = nullptr;
+        mBppPoints = point;
+    } else if (point->pixels < mBppPoints->pixels) {
+        // at the front
+        point->next = mBppPoints;
+        mBppPoints = point;
+    } else {
+        struct bpp_point *after = mBppPoints;
+        while (after->next) {
+            if (point->pixels > after->next->pixels) {
+                after = after->next;
+                continue;
+            }
+
+            // insert before after->next
+            point->next = after->next;
+            after->next = point;
+            break;
+        }
+        if (after->next == nullptr) {
+            // hasn't gone in yet
+            point->next = nullptr;
+            after->next = point;
+        }
+    }
+
+    return true;
+}
+
+double CodecProperties::getBpp(int32_t width, int32_t height) {
+    // look in the per-resolution list
+
+    int32_t pixels = width * height;
+
+    if (mBppPoints) {
+        struct bpp_point *point = mBppPoints;
+        while (point && point->pixels < pixels) {
+            point = point->next;
+        }
+        if (point) {
+            ALOGV("getBpp(w=%d,h=%d) returns %f from bpppoint w=%d h=%d",
+                width, height, point->bpp, point->width, point->height);
+            return point->bpp;
+        }
+    }
+
+    ALOGV("defaulting to %f bpp", mBpp);
+    return mBpp;
+}
 
 std::string CodecProperties::getMapping(std::string key, std::string kind) {
     ALOGV("getMapping(key %s, kind %s )", key.c_str(), kind.c_str());
diff --git a/media/libmediaformatshaper/CodecSeeding.cpp b/media/libmediaformatshaper/CodecSeeding.cpp
index fde7833..a7fcc66 100644
--- a/media/libmediaformatshaper/CodecSeeding.cpp
+++ b/media/libmediaformatshaper/CodecSeeding.cpp
@@ -50,13 +50,16 @@
 
 static preloadTuning_t featuresAvc[] = {
       {true, "vq-target-bpp", "2.45"},
-      {true, "vq-target-qpmax", "41"},
+      {true, "vq-target-bpp-1080p", "2.40"},
+      {true, "vq-target-bpp-540p", "2.60"},
+      {true, "vq-target-bpp-480p", "3.00"},
+      {true, "vq-target-qpmax", "40"},
       {true, nullptr, 0}
 };
 
 static preloadTuning_t featuresHevc[] = {
       {true, "vq-target-bpp", "2.30"},
-      {true, "vq-target-qpmax", "42"}, // nop, since hevc codecs don't declare qp support
+      {true, "vq-target-qpmax", "40"}, // nop, since hevc codecs don't declare qp support
       {true, nullptr, 0}
 };
 
diff --git a/media/libmediaformatshaper/VQApply.cpp b/media/libmediaformatshaper/VQApply.cpp
index 39a5e19..08e23cc 100644
--- a/media/libmediaformatshaper/VQApply.cpp
+++ b/media/libmediaformatshaper/VQApply.cpp
@@ -48,6 +48,15 @@
 //
 static const int BITRATE_MODE_VBR = 1;
 
+
+// constants we use within the calculations
+//
+constexpr double BITRATE_LEAVE_UNTOUCHED = 2.0;
+constexpr double BITRATE_QP_UNAVAILABLE = 1.20;
+// 10% didn't work so hot on bonito (with no QP support)
+// 15% is next.. still leaves a few short
+// 20% ? this is on the edge of what I want do do
+
 //
 // Caller retains ownership of and responsibility for inFormat
 //
@@ -69,69 +78,82 @@
     }
 
     //
-    // apply any and all tools that we have.
+    // consider any and all tools available
     // -- qp
     // -- minimum bits-per-pixel
     //
-    if (!codec->supportsQp()) {
-        ALOGD("minquality: no qp bounding in codec %s", codec->getName().c_str());
-    } else {
-        // use a (configurable) QP value to force better quality
-        //
+    int64_t bitrateChosen = 0;
+    int32_t qpChosen = INT32_MAX;
+
+    int64_t bitrateConfigured = 0;
+    int32_t bitrateConfiguredTmp = 0;
+    (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrateConfiguredTmp);
+    bitrateConfigured = bitrateConfiguredTmp;
+    bitrateChosen = bitrateConfigured;
+
+    int32_t width = 0;
+    (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_WIDTH, &width);
+    int32_t height = 0;
+    (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_HEIGHT, &height);
+    int64_t pixels = ((int64_t)width) * height;
+    double minimumBpp = codec->getBpp(width, height);
+
+    int64_t bitrateFloor = pixels * minimumBpp;
+    if (bitrateFloor > INT32_MAX) bitrateFloor = INT32_MAX;
+
+    // if we are far enough above the target bpp, leave it alone
+    //
+    ALOGV("bitrate: configured %" PRId64 " floor %" PRId64, bitrateConfigured, bitrateFloor);
+    if (bitrateConfigured >= BITRATE_LEAVE_UNTOUCHED * bitrateFloor) {
+        ALOGV("high enough bitrate: configured %" PRId64 " >= %f * floor %" PRId64,
+                bitrateConfigured, BITRATE_LEAVE_UNTOUCHED, bitrateFloor);
+        return 0;
+    }
+
+    // raise anything below the bitrate floor
+    if (bitrateConfigured < bitrateFloor) {
+        ALOGD("raise bitrate: configured %" PRId64 " to floor %" PRId64,
+                bitrateConfigured, bitrateFloor);
+        bitrateChosen = bitrateFloor;
+    }
+
+    bool qpPresent = hasQp(inFormat);
+
+    // add QP, if not already present
+    if (!qpPresent) {
         int32_t qpmax = codec->targetQpMax();
-        int32_t qpmaxUser = INT32_MAX;
-        if (hasQp(inFormat)) {
-            (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, &qpmaxUser);
-            ALOGD("minquality by QP: format already sets QP");
-        }
-
-        // if the system didn't do one, use what the user provided
-        if (qpmax == 0 && qpmaxUser != INT32_MAX) {
-                qpmax = qpmaxUser;
-        }
-        // XXX: if both said something, how do we want to reconcile that
-
-        if (qpmax > 0) {
-            ALOGD("minquality by QP: inject %s=%d", AMEDIAFORMAT_VIDEO_QP_MAX, qpmax);
-            AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, qpmax);
-
-            // force spreading the QP across frame types, since we imposing a value
-            qpSpreadMaxPerFrameType(inFormat, info->qpDelta, info->qpMax, /* override */ true);
+        if (qpmax != INT32_MAX) {
+            ALOGV("choosing qp=%d", qpmax);
+            qpChosen = qpmax;
         }
     }
 
-    double bpp = codec->getBpp();
-    if (bpp > 0.0) {
-        // if we've decided to use bits-per-pixel (per second) to drive the quality
-        //
-        // (properly phrased as 'bits per second per pixel' so that it's resolution
-        // and framerate agnostic
-        //
-        // all of these is structured so that a missing value cleanly gets us to a
-        // non-faulting value of '0' for the minimum bits-per-pixel.
-        //
-        int32_t width = 0;
-        (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_WIDTH, &width);
-        int32_t height = 0;
-        (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_HEIGHT, &height);
-        int32_t bitrateConfigured = 0;
-        (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrateConfigured);
-
-        int64_t pixels = ((int64_t)width) * height;
-        int64_t bitrateFloor = pixels * bpp;
-
-        if (bitrateFloor > INT32_MAX) bitrateFloor = INT32_MAX;
-
-        ALOGD("minquality/bitrate: target %d floor %" PRId64 "(%.3f bpp * (%d w * %d h)",
-              bitrateConfigured, bitrateFloor, codec->getBpp(), height, width);
-
-        if (bitrateConfigured < bitrateFloor) {
-            ALOGD("minquality/target bitrate raised from %d to %" PRId64 " bps",
-                  bitrateConfigured, bitrateFloor);
-            AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, (int32_t)bitrateFloor);
+    // if QP is desired but not supported, compensate with additional bits
+    if (!codec->supportsQp()) {
+        if (qpPresent || qpChosen != INT32_MAX) {
+            ALOGD("minquality: desired QP, but unsupported, boost bitrate %" PRId64 " to %" PRId64,
+                bitrateChosen, (int64_t)(bitrateChosen * BITRATE_QP_UNAVAILABLE));
+            bitrateChosen =  bitrateChosen * BITRATE_QP_UNAVAILABLE;
+            qpChosen = INT32_MAX;
         }
     }
 
+    // apply our chosen values
+    //
+    if (qpChosen != INT32_MAX) {
+        ALOGD("minquality by QP: inject %s=%d", AMEDIAFORMAT_VIDEO_QP_MAX, qpChosen);
+        AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, qpChosen);
+
+        // force spreading the QP across frame types, since we are imposing a value
+        qpSpreadMaxPerFrameType(inFormat, info->qpDelta, info->qpMax, /* override */ true);
+    }
+
+    if (bitrateChosen != bitrateConfigured) {
+        ALOGD("minquality/target bitrate raised from %" PRId64 " to %" PRId64 " bps",
+              bitrateConfigured, bitrateChosen);
+        AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, (int32_t)bitrateChosen);
+    }
+
     return 0;
 }
 
diff --git a/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h b/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
index 84268b9..ff7051f 100644
--- a/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
+++ b/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
@@ -21,6 +21,8 @@
 #include <mutex>
 #include <string>
 
+#include <inttypes.h>
+
 #include <utils/RefBase.h>
 
 namespace android {
@@ -73,7 +75,7 @@
     // This is used to calculate a minimum bitrate for any particular resolution.
     // A 1080p (1920*1080 = 2073600 pixels) to be encoded at 5Mbps has a bpp == 2.41
     void setBpp(double bpp) { mBpp = bpp;}
-    double getBpp() {return mBpp;}
+    double getBpp(int32_t width, int32_t height);
 
     // Does this codec support QP bounding
     // The getMapping() methods provide any needed mapping to non-standard keys.
@@ -92,10 +94,22 @@
     std::string mMediaType;
     int mApi = 0;
     int mMinimumQuality = 0;
-    int mTargetQpMax = 0;
+    int mTargetQpMax = INT32_MAX;
     bool mSupportsQp = false;
     double mBpp = 0.0;
 
+    // allow different target bits-per-pixel based on resolution
+    // similar to codec 'performance points'
+    // uses 'next largest' (by pixel count) point as minimum bpp
+    struct bpp_point {
+        struct bpp_point *next;
+        int32_t pixels;
+        int32_t width, height;
+        double bpp;
+    };
+    struct bpp_point *mBppPoints = nullptr;
+    bool bppPoint(std::string resolution, std::string value);
+
     std::mutex mMappingLock;
     // XXX figure out why I'm having problems getting compiler to like GUARDED_BY
     std::map<std::string, std::string> mMappings /*GUARDED_BY(mMappingLock)*/ ;
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index d250976..287317d 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -38,6 +38,7 @@
         "media_permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libbase",
+        "libactivitymanager_aidl",
         "libandroid_net",
         "libaudioclient",
         "libbinder",
diff --git a/media/libmediatranscoding/TranscodingClientManager.cpp b/media/libmediatranscoding/TranscodingClientManager.cpp
index 086c658..6dbcaf9 100644
--- a/media/libmediatranscoding/TranscodingClientManager.cpp
+++ b/media/libmediatranscoding/TranscodingClientManager.cpp
@@ -97,8 +97,8 @@
     Status addClientUid(int32_t /*in_sessionId*/, int32_t /*in_clientUid*/,
                         bool* /*_aidl_return*/) override;
 
-    Status getClientUids(int32_t /*in_sessionId*/, std::vector<int32_t>* /*out_clientUids*/,
-                         bool* /*_aidl_return*/) override;
+    Status getClientUids(int32_t /*in_sessionId*/,
+                         std::optional<std::vector<int32_t>>* /*_aidl_return*/) override;
 
     Status unregister() override;
 };
@@ -259,10 +259,9 @@
     return Status::ok();
 }
 
-Status TranscodingClientManager::ClientImpl::getClientUids(int32_t in_sessionId,
-                                                           std::vector<int32_t>* out_clientUids,
-                                                           bool* _aidl_return) {
-    *_aidl_return = false;
+Status TranscodingClientManager::ClientImpl::getClientUids(
+        int32_t in_sessionId, std::optional<std::vector<int32_t>>* _aidl_return) {
+    *_aidl_return = std::nullopt;
 
     std::shared_ptr<TranscodingClientManager> owner;
     if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
@@ -273,8 +272,11 @@
         return Status::ok();
     }
 
-    *_aidl_return =
-            owner->mSessionController->getClientUids(mClientId, in_sessionId, out_clientUids);
+    std::vector<int32_t> result;
+
+    if (owner->mSessionController->getClientUids(mClientId, in_sessionId, &result)) {
+        *_aidl_return = result;
+    }
     return Status::ok();
 }
 
diff --git a/media/libmediatranscoding/TranscodingSessionController.cpp b/media/libmediatranscoding/TranscodingSessionController.cpp
index 2518e70..68e2875 100644
--- a/media/libmediatranscoding/TranscodingSessionController.cpp
+++ b/media/libmediatranscoding/TranscodingSessionController.cpp
@@ -195,6 +195,7 @@
 
     bool onSessionStarted(uid_t uid);
     void onSessionCompleted(uid_t uid, std::chrono::microseconds runningTime);
+    void onSessionCancelled(uid_t uid);
 
 private:
     // Threshold of time between finish/start below which a back-to-back start is counted.
@@ -267,6 +268,18 @@
     mUidHistoryMap[uid].lastCompletedTime = std::chrono::steady_clock::now();
 }
 
+void TranscodingSessionController::Pacer::onSessionCancelled(uid_t uid) {
+    if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
+        ALOGV("Pacer::onSessionCancelled: uid %d: not present", uid);
+        return;
+    }
+    // This is only called if a uid is removed from a session (due to it being killed
+    // or the original submitting client was gone but session was kept for offline use).
+    // Since the uid is going to miss the onSessionCompleted(), we can't track this
+    // session, and have to check back at next onSessionStarted().
+    mUidHistoryMap[uid].sessionActive = false;
+}
+
 ///////////////////////////////////////////////////////////////////////////////
 
 TranscodingSessionController::TranscodingSessionController(
@@ -539,8 +552,9 @@
     mSessionQueues[clientUid].push_back(sessionKey);
 }
 
-void TranscodingSessionController::removeSession_l(const SessionKeyType& sessionKey,
-                                                   Session::State finalState, bool keepForOffline) {
+void TranscodingSessionController::removeSession_l(
+        const SessionKeyType& sessionKey, Session::State finalState,
+        const std::shared_ptr<std::function<bool(uid_t uid)>>& keepUid) {
     ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
 
     if (mSessionMap.count(sessionKey) == 0) {
@@ -550,9 +564,17 @@
 
     // Remove session from uid's queue.
     bool uidQueueRemoved = false;
+    std::unordered_set<uid_t> remainingUids;
     for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
-        if (keepForOffline && uid == OFFLINE_UID) {
-            continue;
+        if (keepUid != nullptr) {
+            if ((*keepUid)(uid)) {
+                remainingUids.insert(uid);
+                continue;
+            }
+            // If we have uids to keep, the session is not going to any final
+            // state we can't use onSessionCompleted as the running time will
+            // not be valid. Only notify pacer to stop tracking this session.
+            mPacer->onSessionCancelled(uid);
         }
         SessionQueueType& sessionQueue = mSessionQueues[uid];
         auto it = std::find(sessionQueue.begin(), sessionQueue.end(), sessionKey);
@@ -578,8 +600,8 @@
         moveUidsToTop_l(topUids, false /*preserveTopUid*/);
     }
 
-    if (keepForOffline) {
-        mSessionMap[sessionKey].allClientUids = {OFFLINE_UID};
+    if (keepUid != nullptr) {
+        mSessionMap[sessionKey].allClientUids = remainingUids;
         return;
     }
 
@@ -590,10 +612,10 @@
 
     setSessionState_l(&mSessionMap[sessionKey], finalState);
 
-    if (finalState == Session::FINISHED || finalState == Session::ERROR) {
-        for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
-            mPacer->onSessionCompleted(uid, mSessionMap[sessionKey].runningTime);
-        }
+    // We can use onSessionCompleted() even for CANCELLED, because runningTime is
+    // now updated by setSessionState_l().
+    for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+        mPacer->onSessionCompleted(uid, mSessionMap[sessionKey].runningTime);
     }
 
     mSessionHistory.push_back(mSessionMap[sessionKey]);
@@ -743,8 +765,10 @@
         removeSession_l(*it, Session::CANCELED);
     }
 
+    auto keepUid = std::make_shared<std::function<bool(uid_t)>>(
+            [](uid_t uid) { return uid == OFFLINE_UID; });
     for (auto it = sessionsForOffline.begin(); it != sessionsForOffline.end(); ++it) {
-        removeSession_l(*it, Session::CANCELED, true /*keepForOffline*/);
+        removeSession_l(*it, Session::CANCELED, keepUid);
     }
 
     // Start next session.
@@ -990,6 +1014,58 @@
     validateState_l();
 }
 
+void TranscodingSessionController::onUidGone(uid_t goneUid) {
+    ALOGD("%s: gone uid %u", __FUNCTION__, goneUid);
+
+    std::list<SessionKeyType> sessionsToRemove, sessionsForOtherUids;
+
+    std::scoped_lock lock{mLock};
+
+    for (auto it = mSessionMap.begin(); it != mSessionMap.end(); ++it) {
+        if (it->second.allClientUids.count(goneUid) > 0) {
+            // If goneUid is the only uid, remove the session; otherwise, only
+            // remove the uid from the session.
+            if (it->second.allClientUids.size() > 1) {
+                sessionsForOtherUids.push_back(it->first);
+            } else {
+                sessionsToRemove.push_back(it->first);
+            }
+        }
+    }
+
+    for (auto it = sessionsToRemove.begin(); it != sessionsToRemove.end(); ++it) {
+        // If the session has ever been started, stop it now.
+        // Note that stop() is needed even if the session is currently paused. This instructs
+        // the transcoder to discard any states for the session, otherwise the states may
+        // never be discarded.
+        if (mSessionMap[*it].getState() != Session::NOT_STARTED) {
+            mTranscoder->stop(it->first, it->second);
+        }
+
+        {
+            auto clientCallback = mSessionMap[*it].callback.lock();
+            if (clientCallback != nullptr) {
+                clientCallback->onTranscodingFailed(it->second,
+                                                    TranscodingErrorCode::kUidGoneCancelled);
+            }
+        }
+
+        // Remove the session.
+        removeSession_l(*it, Session::CANCELED);
+    }
+
+    auto keepUid = std::make_shared<std::function<bool(uid_t)>>(
+            [goneUid](uid_t uid) { return uid != goneUid; });
+    for (auto it = sessionsForOtherUids.begin(); it != sessionsForOtherUids.end(); ++it) {
+        removeSession_l(*it, Session::CANCELED, keepUid);
+    }
+
+    // Start next session.
+    updateCurrentSession_l();
+
+    validateState_l();
+}
+
 void TranscodingSessionController::onResourceAvailable() {
     std::scoped_lock lock{mLock};
 
diff --git a/media/libmediatranscoding/TranscodingUidPolicy.cpp b/media/libmediatranscoding/TranscodingUidPolicy.cpp
index b5eb028..0a1ffbc 100644
--- a/media/libmediatranscoding/TranscodingUidPolicy.cpp
+++ b/media/libmediatranscoding/TranscodingUidPolicy.cpp
@@ -141,38 +141,34 @@
 }
 
 void TranscodingUidPolicy::onUidStateChanged(uid_t uid, int32_t procState) {
-    ALOGV("onUidStateChanged: %u, procState %d", uid, procState);
+    ALOGV("onUidStateChanged: uid %u, procState %d", uid, procState);
 
     bool topUidSetChanged = false;
+    bool isUidGone = false;
     std::unordered_set<uid_t> topUids;
     {
         Mutex::Autolock _l(mUidLock);
         auto it = mUidStateMap.find(uid);
         if (it != mUidStateMap.end() && it->second != procState) {
-            // Top set changed if 1) the uid is in the current top uid set, or 2) the
-            // new procState is at least the same priority as the current top uid state.
-            bool isUidCurrentTop =
-                    mTopUidState != IMPORTANCE_UNKNOWN && mStateUidMap[mTopUidState].count(uid) > 0;
-            bool isNewStateHigherThanTop =
-                    procState != IMPORTANCE_UNKNOWN &&
-                    (procState <= mTopUidState || mTopUidState == IMPORTANCE_UNKNOWN);
-            topUidSetChanged = (isUidCurrentTop || isNewStateHigherThanTop);
+            isUidGone = (procState == AACTIVITYMANAGER_IMPORTANCE_GONE);
+
+            topUids = mStateUidMap[mTopUidState];
 
             // Move uid to the new procState.
             mStateUidMap[it->second].erase(uid);
             mStateUidMap[procState].insert(uid);
             it->second = procState;
 
-            if (topUidSetChanged) {
-                updateTopUid_l();
-
+            updateTopUid_l();
+            if (topUids != mStateUidMap[mTopUidState]) {
                 // Make a copy of the uid set for callback.
                 topUids = mStateUidMap[mTopUidState];
+                topUidSetChanged = true;
             }
         }
     }
 
-    ALOGV("topUidSetChanged: %d", topUidSetChanged);
+    ALOGV("topUidSetChanged: %d, isUidGone %d", topUidSetChanged, isUidGone);
 
     if (topUidSetChanged) {
         auto callback = mUidPolicyCallback.lock();
@@ -180,6 +176,12 @@
             callback->onTopUidsChanged(topUids);
         }
     }
+    if (isUidGone) {
+        auto callback = mUidPolicyCallback.lock();
+        if (callback != nullptr) {
+            callback->onUidGone(uid);
+        }
+    }
 }
 
 void TranscodingUidPolicy::updateTopUid_l() {
diff --git a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
index c6fa57f..9ef9052 100644
--- a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
+++ b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
@@ -77,7 +77,8 @@
      * @clientUids array to hold the retrieved client uid list.
      * @return false if the session doesn't exist, true otherwise.
      */
-    boolean getClientUids(in int sessionId, out int[] clientUids);
+    @nullable
+    int[] getClientUids(in int sessionId);
 
     /**
     * Unregister the client with the MediaTranscodingService.
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
index 5349fe1..fdd86c7 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
@@ -38,4 +38,5 @@
     kErrorIO               = kPrivateErrorFirst + 5,
     kInsufficientResources = kPrivateErrorFirst + 6,
     kWatchdogTimeout       = kPrivateErrorFirst + 7,
+    kUidGoneCancelled      = kPrivateErrorFirst + 8,
 }
\ No newline at end of file
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
index 05234f4..2691201 100644
--- a/media/libmediatranscoding/include/media/TranscodingSessionController.h
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -73,6 +73,7 @@
 
     // UidPolicyCallbackInterface
     void onTopUidsChanged(const std::unordered_set<uid_t>& uids) override;
+    void onUidGone(uid_t goneUid) override;
     // ~UidPolicyCallbackInterface
 
     // ResourcePolicyCallbackInterface
@@ -189,7 +190,7 @@
     void updateCurrentSession_l();
     void addUidToSession_l(uid_t uid, const SessionKeyType& sessionKey);
     void removeSession_l(const SessionKeyType& sessionKey, Session::State finalState,
-                         bool keepForOffline = false);
+                         const std::shared_ptr<std::function<bool(uid_t uid)>>& keepUid = nullptr);
     void moveUidsToTop_l(const std::unordered_set<uid_t>& uids, bool preserveTopUid);
     void setSessionState_l(Session* session, Session::State state);
     void notifyClient(ClientIdType clientId, SessionIdType sessionId, const char* reason,
diff --git a/media/libmediatranscoding/include/media/UidPolicyInterface.h b/media/libmediatranscoding/include/media/UidPolicyInterface.h
index 05d8db0..445a2ff 100644
--- a/media/libmediatranscoding/include/media/UidPolicyInterface.h
+++ b/media/libmediatranscoding/include/media/UidPolicyInterface.h
@@ -48,6 +48,9 @@
     // has changed. The receiver of this callback should adjust accordingly.
     virtual void onTopUidsChanged(const std::unordered_set<uid_t>& uids) = 0;
 
+    // Called when a uid is gone.
+    virtual void onUidGone(uid_t goneUid) = 0;
+
 protected:
     virtual ~UidPolicyCallbackInterface() = default;
 };
diff --git a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
index b7b1279..9233410 100644
--- a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
@@ -577,7 +577,7 @@
     addMultipleClients();
 
     bool result;
-    std::vector<int32_t> clientUids;
+    std::optional<std::vector<int32_t>> clientUids;
     TranscodingRequestParcel request;
     TranscodingSessionParcel session;
     uid_t ownUid = ::getuid();
@@ -587,10 +587,10 @@
     EXPECT_FALSE(result);
     EXPECT_TRUE(mClient1->addClientUid(SESSION(0), ownUid, &result).isOk());
     EXPECT_FALSE(result);
-    EXPECT_TRUE(mClient1->getClientUids(-1, &clientUids, &result).isOk());
-    EXPECT_FALSE(result);
-    EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids, &result).isOk());
-    EXPECT_FALSE(result);
+    EXPECT_TRUE(mClient1->getClientUids(-1, &clientUids).isOk());
+    EXPECT_EQ(clientUids, std::nullopt);
+    EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids).isOk());
+    EXPECT_EQ(clientUids, std::nullopt);
 
     unregisterMultipleClients();
 }
@@ -599,7 +599,7 @@
     addMultipleClients();
 
     bool result;
-    std::vector<int32_t> clientUids;
+    std::optional<std::vector<int32_t>> clientUids;
     TranscodingRequestParcel request;
     TranscodingSessionParcel session;
     uid_t ownUid = ::getuid();
@@ -612,10 +612,10 @@
     EXPECT_TRUE(result);
 
     // Should have own uid in client uid list.
-    EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids, &result).isOk());
-    EXPECT_TRUE(result);
-    EXPECT_EQ(clientUids.size(), 1);
-    EXPECT_EQ(clientUids[0], ownUid);
+    EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids).isOk());
+    EXPECT_NE(clientUids, std::nullopt);
+    EXPECT_EQ(clientUids->size(), 1);
+    EXPECT_EQ((*clientUids)[0], ownUid);
 
     // Adding invalid client uid should fail.
     EXPECT_TRUE(mClient1->addClientUid(SESSION(0), kInvalidClientUid, &result).isOk());
@@ -633,28 +633,28 @@
     EXPECT_TRUE(result);
 
     // Should not have own uid in client uid list.
-    EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids, &result).isOk());
-    EXPECT_TRUE(result);
-    EXPECT_EQ(clientUids.size(), 0);
+    EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+    EXPECT_NE(clientUids, std::nullopt);
+    EXPECT_EQ(clientUids->size(), 0);
 
     // Add own uid (with IMediaTranscodingService::USE_CALLING_UID) again, should succeed.
     EXPECT_TRUE(
             mClient1->addClientUid(SESSION(1), IMediaTranscodingService::USE_CALLING_UID, &result)
                     .isOk());
     EXPECT_TRUE(result);
-    EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids, &result).isOk());
-    EXPECT_TRUE(result);
-    EXPECT_EQ(clientUids.size(), 1);
-    EXPECT_EQ(clientUids[0], ownUid);
+    EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+    EXPECT_NE(clientUids, std::nullopt);
+    EXPECT_EQ(clientUids->size(), 1);
+    EXPECT_EQ((*clientUids)[0], ownUid);
 
     // Add more uids, should succeed.
     int32_t kFakeUid = ::getuid() ^ 0x1;
     EXPECT_TRUE(mClient1->addClientUid(SESSION(1), kFakeUid, &result).isOk());
     EXPECT_TRUE(result);
-    EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids, &result).isOk());
-    EXPECT_TRUE(result);
+    EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+    EXPECT_NE(clientUids, std::nullopt);
     std::unordered_set<uid_t> uidSet;
-    uidSet.insert(clientUids.begin(), clientUids.end());
+    uidSet.insert(clientUids->begin(), clientUids->end());
     EXPECT_EQ(uidSet.size(), 2);
     EXPECT_EQ(uidSet.count(ownUid), 1);
     EXPECT_EQ(uidSet.count(kFakeUid), 1);
diff --git a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
index 2be9e7d..9e7fa95 100644
--- a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -915,6 +915,52 @@
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
 }
 
+TEST_F(TranscodingSessionControllerTest, TestUidGone) {
+    ALOGD("TestUidGone");
+
+    mUidPolicy->setTop(UID(0));
+    // Start with unspecified top UID.
+    // Submit real-time sessions to CLIENT(0), session should start immediately.
+    mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+    mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+    EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+    // Submit real-time session to CLIENT(1), should not start.
+    mController->submit(CLIENT(1), SESSION(0), UID(1), UID(1), mOfflineRequest, mClientCallback1);
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+    EXPECT_TRUE(mController->addClientUid(CLIENT(1), SESSION(0), UID(1)));
+
+    // Tell the controller that UID(0) is gone.
+    mUidPolicy->setTop(UID(1));
+    // CLIENT(0)'s SESSION(1) should start, SESSION(0) should be cancelled.
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+    mController->onUidGone(UID(0));
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(0)));
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(0)));
+    EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+    std::vector<int32_t> clientUids;
+    EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+    EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+    EXPECT_EQ(clientUids.size(), 1);
+    EXPECT_EQ(clientUids[0], UID(1));
+
+    // Tell the controller that UID(1) is gone too.
+    mController->onUidGone(UID(1));
+    // CLIENT(1)'s SESSION(0) should start, CLIENT(0)'s SESSION(1) should be cancelled.
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(1)));
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(1)));
+    EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+    EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+    // CLIENT(1) SESSION(0) should not have any client uids as it's only kept for offline.
+    EXPECT_TRUE(mController->getClientUids(CLIENT(1), SESSION(0), &clientUids));
+    EXPECT_EQ(clientUids.size(), 0);
+}
+
 TEST_F(TranscodingSessionControllerTest, TestAddGetClientUids) {
     ALOGD("TestAddGetClientUids");
 
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 1054b68..ca98b28 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -46,13 +46,11 @@
         status_t result;
         result = mStream->getBufferSize(&mStreamBufferSizeBytes);
         if (result != OK) return result;
-        audio_format_t streamFormat;
-        uint32_t sampleRate;
-        audio_channel_mask_t channelMask;
-        result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+        audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+        result = mStream->getAudioProperties(&config);
         if (result != OK) return result;
-        mFormat = Format_from_SR_C(sampleRate,
-                audio_channel_count_from_in_mask(channelMask), streamFormat);
+        mFormat = Format_from_SR_C(config.sample_rate,
+                audio_channel_count_from_in_mask(config.channel_mask), config.format);
         mFrameSize = Format_frameSize(mFormat);
     }
     return NBAIO_Source::negotiate(offers, numOffers, counterOffers, numCounterOffers);
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index 8564899..581867f 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -44,13 +44,11 @@
         status_t result;
         result = mStream->getBufferSize(&mStreamBufferSizeBytes);
         if (result != OK) return result;
-        audio_format_t streamFormat;
-        uint32_t sampleRate;
-        audio_channel_mask_t channelMask;
-        result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+        audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+        result = mStream->getAudioProperties(&config);
         if (result != OK) return result;
-        mFormat = Format_from_SR_C(sampleRate,
-                audio_channel_count_from_out_mask(channelMask), streamFormat);
+        mFormat = Format_from_SR_C(config.sample_rate,
+                audio_channel_count_from_out_mask(config.channel_mask), config.format);
         mFrameSize = Format_frameSize(mFormat);
     }
     return NBAIO_Sink::negotiate(offers, numOffers, counterOffers, numCounterOffers);
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 52434b3..d6e36b9 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -274,6 +274,7 @@
         "MPEG2TSWriter.cpp",
         "MPEG4Writer.cpp",
         "MediaAdapter.cpp",
+        "MediaAppender.cpp",
         "MediaClock.cpp",
         "MediaCodec.cpp",
         "MediaCodecList.cpp",
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 76a5cab..5c39239 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -519,12 +519,12 @@
     mSendNotify = false;
     mWriteSeekErr = false;
     mFallocateErr = false;
-
     // Reset following variables for all the sessions and they will be
     // initialized in start(MetaData *param).
     mIsRealTimeRecording = true;
     mUse4ByteNalLength = true;
     mOffset = 0;
+    mMaxOffsetAppend = 0;
     mPreAllocateFileEndOffset = 0;
     mMdatOffset = 0;
     mMdatEndOffset = 0;
@@ -992,6 +992,19 @@
         seekOrPostError(mFd, mFreeBoxOffset, SEEK_SET);
         writeInt32(mInMemoryCacheSize);
         write("free", 4);
+        if (mInMemoryCacheSize >= 8) {
+            off64_t bufSize = mInMemoryCacheSize - 8;
+            char* zeroBuffer = new (std::nothrow) char[bufSize];
+            if (zeroBuffer) {
+                std::fill_n(zeroBuffer, bufSize, '0');
+                writeOrPostError(mFd, zeroBuffer, bufSize);
+                delete [] zeroBuffer;
+            } else {
+                ALOGW("freebox in file isn't initialized to 0");
+            }
+        } else {
+            ALOGW("freebox size is less than 8:%" PRId64, mInMemoryCacheSize);
+        }
         mMdatOffset = mFreeBoxOffset + mInMemoryCacheSize;
     } else {
         mMdatOffset = mOffset;
@@ -1541,6 +1554,26 @@
         MediaBuffer *buffer, bool usePrefix,
         uint32_t tiffHdrOffset, size_t *bytesWritten) {
     off64_t old_offset = mOffset;
+    int64_t offset;
+    ALOGV("buffer->range_length:%lld", (long long)buffer->range_length());
+    if (buffer->meta_data().findInt64(kKeySampleFileOffset, &offset)) {
+        ALOGV("offset:%lld, old_offset:%lld", (long long)offset, (long long)old_offset);
+        if (old_offset == offset) {
+            mOffset += buffer->range_length();
+        } else {
+            ALOGV("offset and old_offset are not equal! diff:%lld", (long long)offset - old_offset);
+            mOffset = offset + buffer->range_length();
+            // mOffset += buffer->range_length() + offset - old_offset;
+        }
+        *bytesWritten = buffer->range_length();
+        ALOGV("mOffset:%lld, mMaxOffsetAppend:%lld, bytesWritten:%lld", (long long)mOffset,
+                  (long long)mMaxOffsetAppend, (long long)*bytesWritten);
+        mMaxOffsetAppend = std::max(mOffset, mMaxOffsetAppend);
+        seekOrPostError(mFd, mMaxOffsetAppend, SEEK_SET);
+        return offset;
+    }
+
+    ALOGV("mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset, (long long)mMaxOffsetAppend);
 
     if (usePrefix) {
         addMultipleLengthPrefixedSamples_l(buffer);
@@ -1557,6 +1590,10 @@
         mOffset += buffer->range_length();
     }
     *bytesWritten = mOffset - old_offset;
+
+    ALOGV("mOffset:%lld, old_offset:%lld, bytesWritten:%lld", (long long)mOffset,
+          (long long)old_offset, (long long)*bytesWritten);
+
     return old_offset;
 }
 
@@ -1569,6 +1606,7 @@
         (const uint8_t *)buffer->data() + buffer->range_offset();
 
     if (!memcmp(ptr, "\x00\x00\x00\x01", 4)) {
+        ALOGV("stripping start code");
         buffer->set_range(
                 buffer->range_offset() + 4, buffer->range_length() - 4);
     }
@@ -1599,8 +1637,10 @@
 }
 
 void MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
+    ALOGV("alp:buffer->range_length:%lld", (long long)buffer->range_length());
     size_t length = buffer->range_length();
     if (mUse4ByteNalLength) {
+        ALOGV("mUse4ByteNalLength");
         uint8_t x[4];
         x[0] = length >> 24;
         x[1] = (length >> 16) & 0xff;
@@ -1610,6 +1650,7 @@
         writeOrPostError(mFd, (const uint8_t*)buffer->data() + buffer->range_offset(), length);
         mOffset += length + 4;
     } else {
+        ALOGV("mUse2ByteNalLength");
         CHECK_LT(length, 65536u);
 
         uint8_t x[2];
@@ -2762,6 +2803,9 @@
     }
 
     writeAllChunks();
+    ALOGV("threadFunc mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset,
+          (long long)mMaxOffsetAppend);
+    mOffset = std::max(mOffset, mMaxOffsetAppend);
 }
 
 status_t MPEG4Writer::startWriterThread() {
@@ -3323,6 +3367,7 @@
     uint32_t lastSamplesPerChunk = 0;
     int64_t lastSampleDurationUs = -1;      // Duration calculated from EOS buffer and its timestamp
     int64_t lastSampleDurationTicks = -1;   // Timescale based ticks
+    int64_t sampleFileOffset = -1;
 
     if (mIsAudio) {
         prctl(PR_SET_NAME, (unsigned long)"MP4WtrAudTrkThread", 0, 0, 0);
@@ -3342,6 +3387,7 @@
     MediaBufferBase *buffer;
     const char *trackName = getTrackType();
     while (!mDone && (err = mSource->read(&buffer)) == OK) {
+        ALOGV("read:buffer->range_length:%lld", (long long)buffer->range_length());
         int32_t isEOS = false;
         if (buffer->range_length() == 0) {
             if (buffer->meta_data().findInt32(kKeyIsEndOfStream, &isEOS) && isEOS) {
@@ -3448,6 +3494,14 @@
                 continue;
             }
         }
+        if (!buffer->meta_data().findInt64(kKeySampleFileOffset, &sampleFileOffset)) {
+            sampleFileOffset = -1;
+        }
+        int64_t lastSample = -1;
+        if (!buffer->meta_data().findInt64(kKeyLastSampleIndexInChunk, &lastSample)) {
+            lastSample = -1;
+        }
+        ALOGV("sampleFileOffset:%lld", (long long)sampleFileOffset);
 
         /*
          * Reserve space in the file for the current sample + to be written MOOV box. If reservation
@@ -3455,7 +3509,7 @@
          * write MOOV box successfully as space for the same was reserved in the prior call.
          * Release the current buffer/sample here.
          */
-        if (!mOwner->preAllocate(buffer->range_length())) {
+        if (sampleFileOffset == -1 && !mOwner->preAllocate(buffer->range_length())) {
             buffer->release();
             buffer = nullptr;
             break;
@@ -3466,9 +3520,14 @@
         // Make a deep copy of the MediaBuffer and Metadata and release
         // the original as soon as we can
         MediaBuffer *copy = new MediaBuffer(buffer->range_length());
-        memcpy(copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(),
-                buffer->range_length());
+        if (sampleFileOffset != -1) {
+            copy->meta_data().setInt64(kKeySampleFileOffset, sampleFileOffset);
+        } else {
+            memcpy(copy->data(), (uint8_t*)buffer->data() + buffer->range_offset(),
+                   buffer->range_length());
+        }
         copy->set_range(0, buffer->range_length());
+
         meta_data = new MetaData(buffer->meta_data());
         buffer->release();
         buffer = NULL;
@@ -3476,14 +3535,16 @@
             copy->meta_data().setInt32(kKeyExifTiffOffset, tiffHdrOffset);
         }
         bool usePrefix = this->usePrefix() && !isExif;
-
-        if (usePrefix) StripStartcode(copy);
-
+        if (sampleFileOffset == -1 && usePrefix) {
+            StripStartcode(copy);
+        }
         size_t sampleSize = copy->range_length();
-        if (usePrefix) {
+        if (sampleFileOffset == -1 && usePrefix) {
             if (mOwner->useNalLengthFour()) {
+                ALOGV("nallength4");
                 sampleSize += 4;
             } else {
+                ALOGV("nallength2");
                 sampleSize += 2;
             }
         }
@@ -3778,7 +3839,8 @@
                 chunkTimestampUs = timestampUs;
             } else {
                 int64_t chunkDurationUs = timestampUs - chunkTimestampUs;
-                if (chunkDurationUs > interleaveDurationUs) {
+                if (chunkDurationUs > interleaveDurationUs || lastSample > 1) {
+                    ALOGV("lastSample:%lld", (long long)lastSample);
                     if (chunkDurationUs > mMaxChunkDurationUs) {
                         mMaxChunkDurationUs = chunkDurationUs;
                     }
@@ -5331,4 +5393,4 @@
     endBox();
 }
 
-}  // namespace android
+}  // namespace android
\ No newline at end of file
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
new file mode 100644
index 0000000..5d80b30
--- /dev/null
+++ b/media/libstagefright/MediaAppender.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaAppender"
+
+#include <media/stagefright/MediaAppender.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Log.h>
+// TODO : check if this works for NDK apps without JVM
+// #include <media/ndk/NdkJavaVMHelperPriv.h>
+
+namespace android {
+
+struct MediaAppender::sampleDataInfo {
+    size_t size;
+    int64_t time;
+    size_t exTrackIndex;
+    sp<MetaData> meta;
+};
+
+sp<MediaAppender> MediaAppender::create(int fd, AppendMode mode) {
+    if (fd < 0) {
+        ALOGE("invalid file descriptor");
+        return nullptr;
+    }
+    if (!(mode >= APPEND_MODE_FIRST && mode <= APPEND_MODE_LAST)) {
+        ALOGE("invalid mode %d", mode);
+        return nullptr;
+    }
+    sp<MediaAppender> ma = new (std::nothrow) MediaAppender(fd, mode);
+    if (ma->init() != OK) {
+        return nullptr;
+    }
+    return ma;
+}
+
+// TODO: inject mediamuxer and mediaextractor objects.
+// TODO: @format is not required as an input if we can sniff the file and find the format of
+//       the existing content.
+// TODO: Code it to the interface(MediaAppender), and have a separate MediaAppender NDK
+MediaAppender::MediaAppender(int fd, AppendMode mode)
+    : mFd(fd),
+      mMode(mode),
+      // TODO : check if this works for NDK apps without JVM
+      // mExtractor(new NuMediaExtractor(NdkJavaVMHelper::getJNIEnv() != nullptr
+      //           ? NuMediaExtractor::EntryPoint::NDK_WITH_JVM
+      //           : NuMediaExtractor::EntryPoint::NDK_NO_JVM)),
+      mExtractor(new (std::nothrow) NuMediaExtractor(NuMediaExtractor::EntryPoint::NDK_WITH_JVM)),
+      mTrackCount(0),
+      mState(UNINITIALIZED) {
+          ALOGV("MediaAppender::MediaAppender mode:%d", mode);
+      }
+
+status_t MediaAppender::init() {
+    std::scoped_lock lock(mMutex);
+    ALOGV("MediaAppender::init");
+    status_t status = mExtractor->setDataSource(mFd, 0, lseek(mFd, 0, SEEK_END));
+    if (status != OK) {
+        ALOGE("extractor_setDataSource failed, status :%d", status);
+        return status;
+    }
+
+    if (strcmp("MPEG4Extractor", mExtractor->getName()) == 0) {
+        mFormat = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
+    } else {
+        ALOGE("Unsupported format, extractor name:%s", mExtractor->getName());
+        return ERROR_UNSUPPORTED;
+    }
+
+    mTrackCount = mExtractor->countTracks();
+    ALOGV("mTrackCount:%zu", mTrackCount);
+    if (mTrackCount == 0) {
+        ALOGE("no tracks are present");
+        return ERROR_MALFORMED;
+    }
+    size_t exTrackIndex = 0;
+    ssize_t audioTrackIndex = -1, videoTrackIndex = -1;
+    bool audioSyncSampleTimeSet = false;
+
+    while (exTrackIndex < mTrackCount) {
+        sp<AMessage> fmt;
+        status = mExtractor->getTrackFormat(exTrackIndex, &fmt, 0);
+        if (status != OK) {
+            ALOGE("getTrackFormat failed for trackIndex:%zu, status:%d", exTrackIndex, status);
+            return status;
+        }
+        AString mime;
+        if (fmt->findString("mime", &mime)) {
+            if (!strncasecmp(mime.c_str(), "video/", 6)) {
+                ALOGV("VideoTrack");
+                if (videoTrackIndex != -1) {
+                    ALOGE("Not more than one video track is supported");
+                    return ERROR_UNSUPPORTED;
+                }
+                videoTrackIndex = exTrackIndex;
+            } else if (!strncasecmp(mime.c_str(), "audio/", 6)) {
+                ALOGV("AudioTrack");
+                if (audioTrackIndex != -1) {
+                    ALOGE("Not more than one audio track is supported");
+                }
+                audioTrackIndex = exTrackIndex;
+            } else {
+                ALOGV("Neither Video nor Audio track");
+            }
+        }
+        mFmtIndexMap.emplace(exTrackIndex, fmt);
+        mSampleCountVect.emplace_back(0);
+        mMaxTimestampVect.emplace_back(0);
+        mLastSyncSampleTimeVect.emplace_back(0);
+        status = mExtractor->selectTrack(exTrackIndex);
+        if (status != OK) {
+            ALOGE("selectTrack failed for trackIndex:%zu, status:%d", exTrackIndex, status);
+            return status;
+        }
+        ++exTrackIndex;
+    }
+
+    ALOGV("AudioTrackIndex:%zu, VideoTrackIndex:%zu", audioTrackIndex, videoTrackIndex);
+
+    do {
+        sampleDataInfo tmpSDI;
+        // TODO: read info into members of the struct sampleDataInfo directly
+        size_t sampleSize;
+        status = mExtractor->getSampleSize(&sampleSize);
+        if (status != OK) {
+            ALOGE("getSampleSize failed, status:%d", status);
+            return status;
+        }
+        mSampleSizeVect.emplace_back(sampleSize);
+        tmpSDI.size = sampleSize;
+        int64_t sampleTime = 0;
+        status = mExtractor->getSampleTime(&sampleTime);
+        if (status != OK) {
+            ALOGE("getSampleTime failed, status:%d", status);
+            return status;
+        }
+        mSampleTimeVect.emplace_back(sampleTime);
+        tmpSDI.time = sampleTime;
+        status = mExtractor->getSampleTrackIndex(&exTrackIndex);
+        if (status != OK) {
+            ALOGE("getSampleTrackIndex failed, status:%d", status);
+            return status;
+        }
+        mSampleIndexVect.emplace_back(exTrackIndex);
+        tmpSDI.exTrackIndex = exTrackIndex;
+        ++mSampleCountVect[exTrackIndex];
+        mMaxTimestampVect[exTrackIndex] = std::max(mMaxTimestampVect[exTrackIndex], sampleTime);
+        sp<MetaData> sampleMeta;
+        status = mExtractor->getSampleMeta(&sampleMeta);
+        if (status != OK) {
+            ALOGE("getSampleMeta failed, status:%d", status);
+            return status;
+        }
+        mSampleMetaVect.emplace_back(sampleMeta);
+        int32_t val = 0;
+        if (sampleMeta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+            mLastSyncSampleTimeVect[exTrackIndex] = sampleTime;
+        }
+        tmpSDI.meta = sampleMeta;
+        mSDI.emplace_back(tmpSDI);
+    } while (mExtractor->advance() == OK);
+
+    mExtractor.clear();
+
+    std::sort(mSDI.begin(), mSDI.end(), [](sampleDataInfo& a, sampleDataInfo& b) {
+        int64_t aOffset, bOffset;
+        a.meta->findInt64(kKeySampleFileOffset, &aOffset);
+        b.meta->findInt64(kKeySampleFileOffset, &bOffset);
+        return aOffset < bOffset;
+    });
+    for (int64_t syncSampleTime : mLastSyncSampleTimeVect) {
+        ALOGV("before ignoring frames, mLastSyncSampleTimeVect:%lld", (long long)syncSampleTime);
+    }
+    ALOGV("mMode:%u", mMode);
+    if (mMode == APPEND_MODE_IGNORE_LAST_VIDEO_GOP && videoTrackIndex != -1 ) {
+        ALOGV("Video track is present");
+        bool lastVideoIframe = false;
+        size_t lastVideoIframeOffset = 0;
+        int64_t lastVideoSampleTime = -1;
+        for (auto rItr = mSDI.rbegin(); rItr != mSDI.rend(); ++rItr) {
+            if (rItr->exTrackIndex != videoTrackIndex) {
+                continue;
+            }
+            if (lastVideoSampleTime == -1) {
+                lastVideoSampleTime = rItr->time;
+            }
+            int64_t offset = 0;
+            if (!rItr->meta->findInt64(kKeySampleFileOffset, &offset) || offset == 0) {
+                ALOGE("Missing offset");
+                return ERROR_MALFORMED;
+            }
+            ALOGV("offset:%lld", (long long)offset);
+            int32_t val = 0;
+            if (rItr->meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+                ALOGV("sampleTime:%lld", (long long)rItr->time);
+                ALOGV("lastVideoSampleTime:%lld", (long long)lastVideoSampleTime);
+                if (lastVideoIframe == false && (lastVideoSampleTime - rItr->time) >
+                                1000000/* Track interleaving duration in MPEG4Writer*/) {
+                    ALOGV("lastVideoIframe got chosen");
+                    lastVideoIframe = true;
+                    mLastSyncSampleTimeVect[videoTrackIndex] = rItr->time;
+                    lastVideoIframeOffset = offset;
+                    ALOGV("lastVideoIframeOffset:%lld", (long long)offset);
+                    break;
+                }
+            }
+        }
+        if (lastVideoIframe == false) {
+            ALOGV("Need to rewrite all samples");
+            mLastSyncSampleTimeVect[videoTrackIndex] = 0;
+            lastVideoIframeOffset = 0;
+        }
+        unsigned int framesIgnoredCount = 0;
+        for (auto itr = mSDI.begin(); itr != mSDI.end();) {
+            int64_t offset = 0;
+            ALOGV("trackIndex:%zu, %" PRId64 "", itr->exTrackIndex, itr->time);
+            if (itr->meta->findInt64(kKeySampleFileOffset, &offset) &&
+                                        offset >= lastVideoIframeOffset) {
+                ALOGV("offset:%lld", (long long)offset);
+                if (!audioSyncSampleTimeSet && audioTrackIndex != -1 &&
+                                            audioTrackIndex == itr->exTrackIndex) {
+                    mLastSyncSampleTimeVect[audioTrackIndex] = itr->time;
+                    audioSyncSampleTimeSet = true;
+                }
+                itr = mSDI.erase(itr);
+                ++framesIgnoredCount;
+            } else {
+                ++itr;
+            }
+        }
+        ALOGV("framesIgnoredCount:%u", framesIgnoredCount);
+    }
+
+    if (mMode == APPEND_MODE_IGNORE_LAST_VIDEO_GOP && videoTrackIndex == -1 &&
+                            audioTrackIndex != -1) {
+        ALOGV("Only AudioTrack is present");
+        for (auto rItr = mSDI.rbegin(); rItr != mSDI.rend();  ++rItr) {
+            int32_t val = 0;
+            if (rItr->meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+                    mLastSyncSampleTimeVect[audioTrackIndex] = rItr->time;
+                    break;
+            }
+        }
+        unsigned int framesIgnoredCount = 0;
+        for (auto itr = mSDI.begin(); itr != mSDI.end();) {
+            if (itr->time >= mLastSyncSampleTimeVect[audioTrackIndex]) {
+                itr = mSDI.erase(itr);
+                ++framesIgnoredCount;
+            } else {
+                ++itr;
+            }
+        }
+        ALOGV("framesIgnoredCount :%u", framesIgnoredCount);
+    }
+
+    for (size_t i = 0; i < mLastSyncSampleTimeVect.size(); ++i) {
+        ALOGV("mLastSyncSampleTimeVect[%zu]:%lld", i, (long long)mLastSyncSampleTimeVect[i]);
+        mFmtIndexMap[i]->setInt64(
+                "sample-time-before-append" /*AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND*/,
+                mLastSyncSampleTimeVect[i]);
+    }
+    for (size_t i = 0; i < mMaxTimestampVect.size(); ++i) {
+        ALOGV("mMaxTimestamp[%zu]:%lld", i, (long long)mMaxTimestampVect[i]);
+    }
+    for (size_t i = 0; i < mSampleCountVect.size(); ++i) {
+        ALOGV("SampleCountVect[%zu]:%zu", i, mSampleCountVect[i]);
+    }
+    mState = INITIALIZED;
+    return OK;
+}
+
+MediaAppender::~MediaAppender() {
+    ALOGV("MediaAppender::~MediaAppender");
+    mMuxer.clear();
+    mExtractor.clear();
+}
+
+status_t MediaAppender::start() {
+    std::scoped_lock lock(mMutex);
+    ALOGV("MediaAppender::start");
+    if (mState != INITIALIZED) {
+        ALOGE("MediaAppender::start() is called in invalid state %d", mState);
+        return INVALID_OPERATION;
+    }
+    mMuxer = new (std::nothrow) MediaMuxer(mFd, mFormat);
+    for (const auto& n : mFmtIndexMap) {
+        ssize_t muxIndex = mMuxer->addTrack(n.second);
+        if (muxIndex < 0) {
+            ALOGE("addTrack failed");
+            return UNKNOWN_ERROR;
+        }
+        mTrackIndexMap.emplace(n.first, muxIndex);
+    }
+    ALOGV("trackIndexmap size:%zu", mTrackIndexMap.size());
+
+    status_t status = mMuxer->start();
+    if (status != OK) {
+        ALOGE("muxer start failed:%d", status);
+        return status;
+    }
+
+    ALOGV("Sorting samples based on their offsets");
+    for (int i = 0; i < mSDI.size(); ++i) {
+        ALOGV("i:%d", i + 1);
+        /* TODO : Allocate a single allocation of the max size, and reuse it across ABuffers if
+         * using new ABuffer(void *, size_t).
+         */
+        sp<ABuffer> data = new (std::nothrow) ABuffer(mSDI[i].size);
+        if (data == nullptr) {
+            ALOGE("memory allocation failed");
+            return NO_MEMORY;
+        }
+        data->setRange(0, mSDI[i].size);
+        int32_t val = 0;
+        int sampleFlags = 0;
+        if (mSDI[i].meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+            sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+        }
+
+        int64_t val64;
+        if (mSDI[i].meta->findInt64(kKeySampleFileOffset, &val64)) {
+            ALOGV("SampleFileOffset Found :%zu:%lld:%lld", mSDI[i].exTrackIndex,
+                  (long long)mSampleCountVect[mSDI[i].exTrackIndex], (long long)val64);
+            sp<AMessage> bufMeta = data->meta();
+            bufMeta->setInt64("sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND*/,
+                              val64);
+        }
+        if (mSDI[i].meta->findInt64(kKeyLastSampleIndexInChunk, &val64)) {
+            ALOGV("kKeyLastSampleIndexInChunk Found %lld:%lld",
+                  (long long)mSampleCountVect[mSDI[i].exTrackIndex], (long long)val64);
+            sp<AMessage> bufMeta = data->meta();
+            bufMeta->setInt64(
+                    "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+                    val64);
+        }
+        status = mMuxer->writeSampleData(data, mTrackIndexMap[mSDI[i].exTrackIndex], mSDI[i].time,
+                                         sampleFlags);
+        if (status != OK) {
+            ALOGE("muxer writeSampleData failed:%d", status);
+            return status;
+        }
+    }
+    mState = STARTED;
+    return OK;
+}
+
+status_t MediaAppender::stop() {
+    std::scoped_lock lock(mMutex);
+    ALOGV("MediaAppender::stop");
+    if (mState == STARTED) {
+        status_t status = mMuxer->stop();
+        if (status != OK) {
+            mState = ERROR;
+        } else {
+            mState = STOPPED;
+        }
+        return status;
+    } else {
+        ALOGE("stop() is called in invalid state %d", mState);
+        return INVALID_OPERATION;
+    }
+}
+
+ssize_t MediaAppender::getTrackCount() {
+    std::scoped_lock lock(mMutex);
+    ALOGV("MediaAppender::getTrackCount");
+    if (mState != INITIALIZED && mState != STARTED) {
+        ALOGE("getTrackCount() is called in invalid state %d", mState);
+        return -1;
+    }
+    return mTrackCount;
+}
+
+sp<AMessage> MediaAppender::getTrackFormat(size_t idx) {
+    std::scoped_lock lock(mMutex);
+    ALOGV("MediaAppender::getTrackFormat");
+    if (mState != INITIALIZED && mState != STARTED) {
+        ALOGE("getTrackFormat() is called in invalid state %d", mState);
+        return nullptr;
+    }
+    if (idx < 0 || idx >= mTrackCount) {
+        ALOGE("getTrackFormat() idx is out of range");
+        return nullptr;
+    }
+    return mFmtIndexMap[idx];
+}
+
+status_t MediaAppender::writeSampleData(const sp<ABuffer>& buffer, size_t trackIndex,
+                                        int64_t timeUs, uint32_t flags) {
+    std::scoped_lock lock(mMutex);
+    ALOGV("writeSampleData:trackIndex:%zu, time:%" PRId64 "", trackIndex, timeUs);
+    return mMuxer->writeSampleData(buffer, trackIndex, timeUs, flags);
+}
+
+status_t MediaAppender::setOrientationHint([[maybe_unused]] int degrees) {
+    ALOGE("setOrientationHint not supported. Has to be called prior to start on initial muxer");
+    return ERROR_UNSUPPORTED;
+};
+
+status_t MediaAppender::setLocation([[maybe_unused]] int latit, [[maybe_unused]] int longit) {
+    ALOGE("setLocation not supported. Has to be called prior to start on initial muxer");
+    return ERROR_UNSUPPORTED;
+}
+
+ssize_t MediaAppender::addTrack([[maybe_unused]] const sp<AMessage> &format) {
+    ALOGE("addTrack not supported");
+    return ERROR_UNSUPPORTED;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 16cd5ca..c21ea8f 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -29,6 +29,7 @@
 #include <C2Buffer.h>
 
 #include "include/SoftwareRenderer.h"
+#include "PlaybackDurationAccumulator.h"
 
 #include <android/hardware/cas/native/1.0/IDescrambler.h>
 #include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
@@ -139,6 +140,8 @@
 static const char *kCodecRecentLatencyAvg = "android.media.mediacodec.recent.avg";      /* in us */
 static const char *kCodecRecentLatencyCount = "android.media.mediacodec.recent.n";
 static const char *kCodecRecentLatencyHist = "android.media.mediacodec.recent.hist";    /* in us */
+static const char *kCodecPlaybackDuration =
+        "android.media.mediacodec.playback-duration"; /* in sec */
 
 static const char *kCodecShapingEnhanced = "android.media.mediacodec.shaped";    /* 0/1 */
 
@@ -719,6 +722,8 @@
       mHaveInputSurface(false),
       mHavePendingInputBuffers(false),
       mCpuBoostRequested(false),
+      mPlaybackDurationAccumulator(new PlaybackDurationAccumulator()),
+      mIsSurfaceToScreen(false),
       mLatencyUnknown(0),
       mBytesEncoded(0),
       mEarliestEncodedPtsUs(INT64_MAX),
@@ -825,6 +830,10 @@
     if (mLatencyUnknown > 0) {
         mediametrics_setInt64(mMetricsHandle, kCodecLatencyUnknown, mLatencyUnknown);
     }
+    int64_t playbackDuration = mPlaybackDurationAccumulator->getDurationInSeconds();
+    if (playbackDuration > 0) {
+        mediametrics_setInt64(mMetricsHandle, kCodecPlaybackDuration, playbackDuration);
+    }
     if (mLifetimeStartNs > 0) {
         nsecs_t lifetime = systemTime(SYSTEM_TIME_MONOTONIC) - mLifetimeStartNs;
         lifetime = lifetime / (1000 * 1000);    // emitted in ms, truncated not rounded
@@ -962,6 +971,22 @@
     ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
 }
 
+void MediaCodec::updatePlaybackDuration(const sp<AMessage> &msg) {
+    if (msg->what() != kWhatOutputFramesRendered) {
+        ALOGE("updatePlaybackDuration: expected kWhatOuputFramesRendered (%d)", msg->what());
+        return;
+    }
+    // Playback duration only counts if the buffers are going to the screen.
+    if (!mIsSurfaceToScreen) {
+        return;
+    }
+    int64_t renderTimeNs;
+    size_t index = 0;
+    while (msg->findInt64(AStringPrintf("%zu-system-nano", index++).c_str(), &renderTimeNs)) {
+        mPlaybackDurationAccumulator->processRenderTime(renderTimeNs);
+    }
+}
+
 bool MediaCodec::Histogram::setup(int nbuckets, int64_t width, int64_t floor)
 {
     if (nbuckets <= 0 || width <= 0) {
@@ -1397,6 +1422,7 @@
  * MediaFormat Shaping forward declarations
  * including the property name we use for control.
  */
+static int enableMediaFormatShapingDefault = 1;
 static const char enableMediaFormatShapingProperty[] = "debug.stagefright.enableshaping";
 static void mapFormat(AString componentName, const sp<AMessage> &format, const char *kind,
                       bool reverse);
@@ -1472,7 +1498,8 @@
     }
 
     if (flags & CONFIGURE_FLAG_ENCODE) {
-        int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty, 0);
+        int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty,
+                                                 enableMediaFormatShapingDefault);
         if (!enableShaping) {
             ALOGI("format shaping disabled, property '%s'", enableMediaFormatShapingProperty);
         } else {
@@ -3122,6 +3149,7 @@
                     ALOGV("TunnelPeekState: %s -> %s",
                           asString(previousState),
                           asString(TunnelPeekState::kBufferRendered));
+                    updatePlaybackDuration(msg);
                     // check that we have a notification set
                     if (mOnFrameRenderedNotification != NULL) {
                         sp<AMessage> notify = mOnFrameRenderedNotification->dup();
@@ -4822,6 +4850,10 @@
             return ALREADY_EXISTS;
         }
 
+        // in case we don't connect, ensure that we don't signal the surface is
+        // connected to the screen
+        mIsSurfaceToScreen = false;
+
         err = nativeWindowConnect(surface.get(), "connectToSurface");
         if (err == OK) {
             // Require a fresh set of buffers after each connect by using a unique generation
@@ -4847,6 +4879,10 @@
             if (!mAllowFrameDroppingBySurface) {
                 disableLegacyBufferDropPostQ(surface);
             }
+            // keep track whether or not the buffers of the connected surface go to the screen
+            int result = 0;
+            surface->query(NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &result);
+            mIsSurfaceToScreen = result != 0;
         }
     }
     // do not return ALREADY_EXISTS unless surfaces are the same
@@ -4864,6 +4900,7 @@
         }
         // assume disconnected even on error
         mSurface.clear();
+        mIsSurfaceToScreen = false;
     }
     return err;
 }
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 876d06c..0107c32 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -169,9 +169,7 @@
 }
 
 status_t MediaCodecSource::Puller::setStopTimeUs(int64_t stopTimeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSetStopTimeUs, this);
-    msg->setInt64("stop-time-us", stopTimeUs);
-    return postSynchronouslyAndReturnError(msg);
+    return mSource->setStopTimeUs(stopTimeUs);
 }
 
 status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta, const sp<AMessage> &notify) {
@@ -189,19 +187,11 @@
 }
 
 void MediaCodecSource::Puller::stop() {
-    bool interrupt = false;
-    {
-        // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
-        // stop.
-        Mutexed<Queue>::Locked queue(mQueue);
-        queue->mPulling = false;
-        interrupt = queue->mReadPendingSince && (queue->mReadPendingSince < ALooper::GetNowUs() - 1000000);
-        queue->flush(); // flush any unprocessed pulled buffers
-    }
-
-    if (interrupt) {
-        interruptSource();
-    }
+    // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
+    // stop.
+    Mutexed<Queue>::Locked queue(mQueue);
+    queue->mPulling = false;
+    queue->flush(); // flush any unprocessed pulled buffers
 }
 
 void MediaCodecSource::Puller::interruptSource() {
@@ -685,9 +675,9 @@
     if (mStopping && reachedEOS) {
         ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
         if (mPuller != NULL) {
-            mPuller->stopSource();
+            mPuller->interruptSource();
         }
-        ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
+        ALOGI("source (%s) stopped", mIsVideo ? "video" : "audio");
         // posting reply to everyone that's waiting
         List<sp<AReplyToken>>::iterator it;
         for (it = mStopReplyIDQueue.begin();
@@ -715,6 +705,9 @@
 status_t MediaCodecSource::feedEncoderInputBuffers() {
     MediaBufferBase* mbuf = NULL;
     while (!mAvailEncoderInputIndices.empty() && mPuller->readBuffer(&mbuf)) {
+        if (!mEncoder) {
+            return BAD_VALUE;
+        }
         size_t bufferIndex = *mAvailEncoderInputIndices.begin();
         mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
 
@@ -893,7 +886,7 @@
     {
         int32_t eos = 0;
         if (msg->findInt32("eos", &eos) && eos) {
-            ALOGV("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
+            ALOGI("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
             signalEOS();
             break;
         }
@@ -1111,12 +1104,7 @@
         if (generation != mGeneration) {
              break;
         }
-
-        if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
-            ALOGV("source (%s) stopping", mIsVideo ? "video" : "audio");
-            mPuller->interruptSource();
-            ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
-        }
+        ALOGD("source (%s) stopping stalled", mIsVideo ? "video" : "audio");
         signalEOS();
         break;
     }
@@ -1148,7 +1136,7 @@
         if (mFlags & FLAG_USE_SURFACE_INPUT) {
             sp<AMessage> params = new AMessage;
             params->setInt64(PARAMETER_KEY_OFFSET_TIME, mInputBufferTimeOffsetUs);
-            err = mEncoder->setParameters(params);
+            err = mEncoder ? mEncoder->setParameters(params) : BAD_VALUE;
         }
 
         sp<AMessage> response = new AMessage;
@@ -1168,7 +1156,7 @@
         if (mFlags & FLAG_USE_SURFACE_INPUT) {
             sp<AMessage> params = new AMessage;
             params->setInt64("stop-time-us", stopTimeUs);
-            err = mEncoder->setParameters(params);
+            err = mEncoder ? mEncoder->setParameters(params) : BAD_VALUE;
         } else {
             err = mPuller->setStopTimeUs(stopTimeUs);
         }
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index c91386d..a946f71 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -76,6 +76,7 @@
     mFileMeta.clear();
     mWriter.clear();
     mTrackList.clear();
+    mFormatList.clear();
 }
 
 ssize_t MediaMuxer::addTrack(const sp<AMessage> &format) {
@@ -109,6 +110,8 @@
             ALOGW("addTrack() setCaptureRate failed :%d", result);
         }
     }
+
+    mFormatList.add(format);
     return mTrackList.add(newTrack);
 }
 
@@ -224,9 +227,42 @@
         ALOGV("BUFFER_FLAG_EOS");
     }
 
+    sp<AMessage> bufMeta = buffer->meta();
+    int64_t val64;
+    if (bufMeta->findInt64("sample-file-offset", &val64)) {
+        sampleMetaData.setInt64(kKeySampleFileOffset, val64);
+    }
+    if (bufMeta->findInt64(
+                "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+                &val64)) {
+        sampleMetaData.setInt64(kKeyLastSampleIndexInChunk, val64);
+    }
+
     sp<MediaAdapter> currentTrack = mTrackList[trackIndex];
     // This pushBuffer will wait until the mediaBuffer is consumed.
     return currentTrack->pushBuffer(mediaBuffer);
 }
 
+ssize_t MediaMuxer::getTrackCount() {
+    Mutex::Autolock autoLock(mMuxerLock);
+    if (mState != INITIALIZED && mState != STARTED) {
+        ALOGE("getTrackCount() must be called either in INITIALIZED or STARTED state");
+        return -1;
+    }
+    return mTrackList.size();
+}
+
+sp<AMessage> MediaMuxer::getTrackFormat([[maybe_unused]] size_t idx) {
+    Mutex::Autolock autoLock(mMuxerLock);
+    if (mState != INITIALIZED && mState != STARTED) {
+        ALOGE("getTrackFormat() must be called either in INITIALIZED or STARTED state");
+        return nullptr;
+    }
+    if (idx < 0 || idx >= mFormatList.size()) {
+        ALOGE("getTrackFormat() idx is out of range");
+        return nullptr;
+    }
+    return mFormatList[idx];
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 24ba38a..2447f5e 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -133,6 +133,14 @@
         if (format->mFormat->findInt64("target-time", &val64)) {
             meta.setInt64(kKeyTargetTime, val64);
         }
+        if (format->mFormat->findInt64("sample-file-offset", &val64)) {
+            meta.setInt64(kKeySampleFileOffset, val64);
+        }
+        if (format->mFormat->findInt64(
+                    "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+                    &val64)) {
+            meta.setInt64(kKeyLastSampleIndexInChunk, val64);
+        }
         int32_t val32;
         if (format->mFormat->findInt32("is-sync-frame", &val32)) {
             meta.setInt32(kKeyIsSyncFrame, val32);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index f2c7dd6..f0383b5 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -189,6 +189,11 @@
     return err;
 }
 
+const char* NuMediaExtractor::getName() const {
+    Mutex::Autolock autoLock(mLock);
+    return mImpl == nullptr ? nullptr : mImpl->name().string();
+}
+
 static String8 arrayToString(const std::vector<uint8_t> &array) {
     String8 result;
     for (size_t i = 0; i < array.size(); i++) {
diff --git a/media/libstagefright/OWNERS b/media/libstagefright/OWNERS
index 819389d..0cc2294 100644
--- a/media/libstagefright/OWNERS
+++ b/media/libstagefright/OWNERS
@@ -4,4 +4,8 @@
 lajos@google.com
 marcone@google.com
 taklee@google.com
-wonsik@google.com
\ No newline at end of file
+wonsik@google.com
+
+# LON
+olly@google.com
+andrewlewis@google.com
diff --git a/media/libstagefright/PlaybackDurationAccumulator.h b/media/libstagefright/PlaybackDurationAccumulator.h
new file mode 100644
index 0000000..cb5f0c4
--- /dev/null
+++ b/media/libstagefright/PlaybackDurationAccumulator.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PLAYBACK_DURATION_ACCUMULATOR_H_
+
+namespace android {
+
+// Accumulates playback duration by processing render times of individual frames and by ignoring
+// frames rendered during inactive playbacks such as seeking, pausing, or re-buffering.
+class PlaybackDurationAccumulator {
+private:
+    // Controls the maximum delta between render times before considering the playback is not
+    // active and has stalled.
+    static const int64_t MAX_PRESENTATION_DURATION_NS = 500 * 1000 * 1000;
+
+public:
+    PlaybackDurationAccumulator() {
+        mPlaybackDurationNs = 0;
+        mPreviousRenderTimeNs = 0;
+    }
+
+    // Process a render time expressed in nanoseconds.
+    void processRenderTime(int64_t newRenderTimeNs) {
+        // If we detect wrap-around or out of order frames, just ignore the duration for this
+        // and the next frame.
+        if (newRenderTimeNs < mPreviousRenderTimeNs) {
+            mPreviousRenderTimeNs = 0;
+        }
+        if (mPreviousRenderTimeNs > 0) {
+            int64_t presentationDurationNs = newRenderTimeNs - mPreviousRenderTimeNs;
+            if (presentationDurationNs < MAX_PRESENTATION_DURATION_NS) {
+                mPlaybackDurationNs += presentationDurationNs;
+            }
+        }
+        mPreviousRenderTimeNs = newRenderTimeNs;
+    }
+
+    int64_t getDurationInSeconds() {
+        return mPlaybackDurationNs / 1000 / 1000 / 1000; // Nanoseconds to seconds.
+    }
+
+private:
+    // The playback duration accumulated so far.
+    int64_t mPlaybackDurationNs;
+    // The previous render time used to compute the next presentation duration.
+    int64_t mPreviousRenderTimeNs;
+};
+
+}
+
+#endif
+
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 5ede871..04a9b17 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -725,16 +725,19 @@
     }
 };
 
-static std::vector<std::pair<const char *, uint32_t>> int64Mappings {
+static std::vector<std::pair<const char*, uint32_t>> int64Mappings {
     {
-        { "exif-offset", kKeyExifOffset },
-        { "exif-size", kKeyExifSize },
-        { "xmp-offset", kKeyXmpOffset },
-        { "xmp-size", kKeyXmpSize },
-        { "target-time", kKeyTargetTime },
-        { "thumbnail-time", kKeyThumbnailTime },
-        { "timeUs", kKeyTime },
-        { "durationUs", kKeyDuration },
+        { "exif-offset", kKeyExifOffset},
+        { "exif-size", kKeyExifSize},
+        { "xmp-offset", kKeyXmpOffset},
+        { "xmp-size", kKeyXmpSize},
+        { "target-time", kKeyTargetTime},
+        { "thumbnail-time", kKeyThumbnailTime},
+        { "timeUs", kKeyTime},
+        { "durationUs", kKeyDuration},
+        { "sample-file-offset", kKeySampleFileOffset},
+        { "last-sample-index-in-chunk", kKeyLastSampleIndexInChunk},
+        { "sample-time-before-append", kKeySampleTimeBeforeAppend},
     }
 };
 
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 2582ed0..7f2728e 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -106,6 +106,7 @@
     off64_t mOffset;
     off64_t mPreAllocateFileEndOffset;  //End of file offset during preallocation.
     off64_t mMdatOffset;
+    off64_t mMaxOffsetAppend; // File offset written upto while appending.
     off64_t mMdatEndOffset;  // End offset of mdat atom.
     uint8_t *mInMemoryCache;
     off64_t mInMemoryCacheOffset;
diff --git a/media/libstagefright/include/media/stagefright/MediaAppender.h b/media/libstagefright/include/media/stagefright/MediaAppender.h
new file mode 100644
index 0000000..c2f6f10
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaAppender.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_APPENDER_H
+#define ANDROID_MEDIA_APPENDER_H
+
+#include <media/stagefright/MediaMuxer.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <stack>
+
+namespace android {
+
+struct MediaAppender : public MediaMuxerBase {
+public:
+    enum AppendMode {
+        APPEND_MODE_FIRST = 0,
+        APPEND_MODE_IGNORE_LAST_VIDEO_GOP = APPEND_MODE_FIRST,
+        APPEND_MODE_ADD_TO_EXISTING_DATA = 1,
+        APPEND_MODE_LAST = APPEND_MODE_ADD_TO_EXISTING_DATA,
+    };
+
+    static sp<MediaAppender> create(int fd, AppendMode mode);
+
+    virtual ~MediaAppender();
+
+    status_t init();
+
+    status_t start();
+
+    status_t stop();
+
+    status_t writeSampleData(const sp<ABuffer>& buffer, size_t trackIndex, int64_t timeUs,
+                             uint32_t flags);
+
+    status_t setOrientationHint(int degrees);
+
+    status_t setLocation(int latitude, int longitude);
+
+    ssize_t addTrack(const sp<AMessage> &format);
+
+    ssize_t getTrackCount();
+
+    sp<AMessage> getTrackFormat(size_t idx);
+
+private:
+    MediaAppender(int fd, AppendMode mode);
+
+    int mFd;
+    MediaMuxer::OutputFormat mFormat;
+    AppendMode mMode;
+    sp<NuMediaExtractor> mExtractor;
+    sp<MediaMuxer> mMuxer;
+    size_t mTrackCount;
+    // Map track index given by extractor to the ones received from muxer.
+    std::map<size_t, ssize_t> mTrackIndexMap;
+    // Count of the samples in each track, indexed by extractor track ids.
+    std::vector<size_t> mSampleCountVect;
+    // Extractor track index of samples.
+    std::vector<size_t> mSampleIndexVect;
+    // Track format indexed by extractor track ids.
+    std::map<size_t, sp<AMessage>> mFmtIndexMap;
+    // Size of samples.
+    std::vector<size_t> mSampleSizeVect;
+    // Presentation time stamp of samples.
+    std::vector<int64_t> mSampleTimeVect;
+    // Timestamp of last sample of tracks.
+    std::vector<int64_t> mMaxTimestampVect;
+    // Metadata of samples.
+    std::vector<sp<MetaData>> mSampleMetaVect;
+    std::mutex mMutex;
+    // Timestamp of the last sync sample of tracks.
+    std::vector<int64_t> mLastSyncSampleTimeVect;
+
+    struct sampleDataInfo;
+    std::vector<sampleDataInfo> mSDI;
+
+    enum : int {
+        UNINITIALIZED,
+        INITIALIZED,
+        STARTED,
+        STOPPED,
+        ERROR,
+    } mState GUARDED_BY(mMutex);
+};
+
+}  // namespace android
+#endif  // ANDROID_MEDIA_APPENDER_H
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 3f93e6d..d7b1794 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -58,6 +58,7 @@
 struct PersistentSurface;
 class SoftwareRenderer;
 class Surface;
+class PlaybackDurationAccumulator;
 namespace hardware {
 namespace cas {
 namespace native {
@@ -413,6 +414,7 @@
     void updateLowLatency(const sp<AMessage> &msg);
     constexpr const char *asString(TunnelPeekState state, const char *default_string="?");
     void updateTunnelPeek(const sp<AMessage> &msg);
+    void updatePlaybackDuration(const sp<AMessage> &msg);
 
     sp<AMessage> mOutputFormat;
     sp<AMessage> mInputFormat;
@@ -480,6 +482,9 @@
 
     std::shared_ptr<BufferChannelBase> mBufferChannel;
 
+    PlaybackDurationAccumulator * mPlaybackDurationAccumulator;
+    bool mIsSurfaceToScreen;
+
     MediaCodec(
             const sp<ALooper> &looper, pid_t pid, uid_t uid,
             std::function<sp<CodecBase>(const AString &, const char *)> getCodecBase = nullptr,
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index a1b9465..e97a65e 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -22,7 +22,12 @@
 #include <utils/Vector.h>
 #include <utils/threads.h>
 
+#include <map>
+#include <mutex>
+#include <vector>
+
 #include "media/stagefright/foundation/ABase.h"
+#include "MediaMuxerBase.h"
 
 namespace android {
 
@@ -33,6 +38,7 @@
 struct MediaSource;
 class MetaData;
 struct MediaWriter;
+struct NuMediaExtractor;
 
 // MediaMuxer is used to mux multiple tracks into a video. Currently, we only
 // support a mp4 file as the output.
@@ -40,19 +46,8 @@
 // Constructor -> addTrack+ -> start -> writeSampleData+ -> stop
 // If muxing operation need to be cancelled, the app is responsible for
 // deleting the output file after stop.
-struct MediaMuxer : public RefBase {
+struct MediaMuxer : public MediaMuxerBase {
 public:
-    // Please update media/java/android/media/MediaMuxer.java if the
-    // OutputFormat is updated.
-    enum OutputFormat {
-        OUTPUT_FORMAT_MPEG_4      = 0,
-        OUTPUT_FORMAT_WEBM        = 1,
-        OUTPUT_FORMAT_THREE_GPP   = 2,
-        OUTPUT_FORMAT_HEIF        = 3,
-        OUTPUT_FORMAT_OGG         = 4,
-        OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
-    };
-
     // Construct the muxer with the file descriptor. Note that the MediaMuxer
     // will close this file at stop().
     MediaMuxer(int fd, OutputFormat format);
@@ -117,10 +112,25 @@
     status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
                              int64_t timeUs, uint32_t flags) ;
 
+    /**
+     * Gets the number of tracks added successfully.  Should be called in
+     * INITIALIZED(after constructor) or STARTED(after start()) state.
+     * @return the number of tracks or -1 in wrong state.
+     */
+    ssize_t getTrackCount();
+
+    /**
+     * Gets the format of the track by their index.
+     * @param idx : index of the track whose format is wanted.
+     * @return smart pointer to AMessage containing the format details.
+     */
+    sp<AMessage> getTrackFormat(size_t idx);
+
 private:
     const OutputFormat mFormat;
     sp<MediaWriter> mWriter;
     Vector< sp<MediaAdapter> > mTrackList;  // Each track has its MediaAdapter.
+    Vector< sp<AMessage> > mFormatList; // Format of each track.
     sp<MetaData> mFileMeta;  // Metadata for the whole file.
     Mutex mMuxerLock;
 
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxerBase.h b/media/libstagefright/include/media/stagefright/MediaMuxerBase.h
new file mode 100644
index 0000000..f02d510
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaMuxerBase.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_MUXER_BASE_H_
+#define MEDIA_MUXER_BASE_H_
+
+#include <utils/RefBase.h>
+#include "media/stagefright/foundation/ABase.h"
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+
+// MediaMuxer is used to mux multiple tracks into a video. Currently, we only
+// support a mp4 file as the output.
+// The expected calling order of the functions is:
+// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop
+// If muxing operation need to be cancelled, the app is responsible for
+// deleting the output file after stop.
+struct MediaMuxerBase : public RefBase {
+public:
+    // Please update media/java/android/media/MediaMuxer.java if the
+    // OutputFormat is updated.
+    enum OutputFormat {
+        OUTPUT_FORMAT_MPEG_4      = 0,
+        OUTPUT_FORMAT_WEBM        = 1,
+        OUTPUT_FORMAT_THREE_GPP   = 2,
+        OUTPUT_FORMAT_HEIF        = 3,
+        OUTPUT_FORMAT_OGG         = 4,
+        OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
+    };
+
+    // Construct the muxer with the file descriptor. Note that the MediaMuxer
+    // will close this file at stop().
+    MediaMuxerBase() {};
+
+    virtual ~MediaMuxerBase() {};
+
+    /**
+     * Add a track with its format information. This should be
+     * called before start().
+     * @param format the track's format.
+     * @return the track's index or negative number if error.
+     */
+    virtual ssize_t addTrack(const sp<AMessage> &format) = 0;
+
+    /**
+     * Start muxing. Make sure all the tracks have been added before
+     * calling this.
+     */
+    virtual status_t start() = 0;
+
+    /**
+     * Set the orientation hint.
+     * @param degrees The rotation degrees. It has to be either 0,
+     *                90, 180 or 270.
+     * @return OK if no error.
+     */
+    virtual status_t setOrientationHint(int degrees) = 0;
+
+    /**
+     * Set the location.
+     * @param latitude The latitude in degree x 1000. Its value must be in the range
+     * [-900000, 900000].
+     * @param longitude The longitude in degree x 1000. Its value must be in the range
+     * [-1800000, 1800000].
+     * @return OK if no error.
+     */
+    virtual status_t setLocation(int latitude, int longitude) = 0;
+
+    /**
+     * Stop muxing.
+     * This method is a blocking call. Depending on how
+     * much data is bufferred internally, the time needed for stopping
+     * the muxer may be time consuming. UI thread is
+     * not recommended for launching this call.
+     * @return OK if no error.
+     */
+    virtual status_t stop() = 0;
+
+    /**
+     * Send a sample buffer for muxing.
+     * The buffer can be reused once this method returns. Typically,
+     * this function won't be blocked for very long, and thus there
+     * is no need to use a separate thread calling this method to
+     * push a buffer.
+     * @param buffer the incoming sample buffer.
+     * @param trackIndex the buffer's track index number.
+     * @param timeUs the buffer's time stamp.
+     * @param flags the only supported flag for now is
+     *              MediaCodec::BUFFER_FLAG_SYNCFRAME.
+     * @return OK if no error.
+     */
+    virtual status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
+                             int64_t timeUs, uint32_t flags) = 0 ;
+
+    /**
+     * Gets the number of tracks added successfully.  Should be called in
+     * INITIALIZED(after constructor) or STARTED(after start()) state.
+     * @return the number of tracks or -1 in wrong state.
+     */
+    virtual ssize_t getTrackCount() = 0;
+
+    /**
+     * Gets the format of the track by their index.
+     * @param idx : index of the track whose format is wanted.
+     * @return smart pointer to AMessage containing the format details.
+     */
+    virtual sp<AMessage> getTrackFormat(size_t idx) = 0;
+
+private:
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaMuxerBase);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_MUXER_BASE_H_
+
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 940bd86..408872f 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -264,6 +264,11 @@
     // Slow-motion markers
     kKeySlowMotionMarkers = 'slmo', // raw data, byte array following spec for
                                     // MediaFormat#KEY_SLOW_MOTION_MARKERS
+
+    kKeySampleFileOffset = 'sfof', // int64_t, sample's offset in a media file.
+    kKeyLastSampleIndexInChunk = 'lsic',  //int64_t, index of last sample in a chunk.
+    kKeySampleTimeBeforeAppend = 'lsba', // int64_t, timestamp of last sample of a track.
+
 };
 
 enum {
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index d8f2b00..6aa7c0f 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -100,6 +100,10 @@
 
     status_t getAudioPresentations(size_t trackIdx, AudioPresentationCollection *presentations);
 
+    status_t setPlaybackId(const String8& playbackId);
+
+    const char* getName() const;
+
 protected:
     virtual ~NuMediaExtractor();
 
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 0c65e9e..07fc5de 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -419,6 +419,7 @@
 
 EXPORT
 media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex, AMediaFormat *fmt) {
+    ALOGV("AMediaExtractor_getSampleFormat");
     if (fmt == NULL) {
         return AMEDIA_ERROR_INVALID_PARAMETER;
     }
@@ -428,6 +429,9 @@
     if (err != OK) {
         return translate_error(err);
     }
+#ifdef LOG_NDEBUG
+    sampleMeta->dumpToLog();
+#endif
 
     sp<AMessage> meta;
     AMediaFormat_getFormat(fmt, &meta);
@@ -483,6 +487,19 @@
         meta->setBuffer(AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO, audioPresentationsData);
     }
 
+    int64_t val64;
+    if (sampleMeta->findInt64(kKeySampleFileOffset, &val64)) {
+        meta->setInt64("sample-file-offset", val64);
+        ALOGV("SampleFileOffset Found");
+    }
+    if (sampleMeta->findInt64(kKeyLastSampleIndexInChunk, &val64)) {
+        meta->setInt64("last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+                       val64);
+        ALOGV("kKeyLastSampleIndexInChunk Found");
+    }
+
+    ALOGV("AMediaFormat_toString:%s", AMediaFormat_toString(fmt));
+
     return AMEDIA_OK;
 }
 
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 1773023..c1793ce 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -334,6 +334,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME = "is-sync-frame";
 EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
 EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK = "last-sample-index-in-chunk";
 EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
 EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
 EXPORT const char* AMEDIAFORMAT_KEY_LOCATION = "location";
@@ -359,7 +360,9 @@
 EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
 EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
 EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
+EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET = "sample-file-offset";
 EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND = "sample-time-before-append";
 EXPORT const char* AMEDIAFORMAT_KEY_SAR_HEIGHT = "sar-height";
 EXPORT const char* AMEDIAFORMAT_KEY_SAR_WIDTH = "sar-width";
 EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index d1992bf..1965e62 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -17,28 +17,24 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NdkMediaMuxer"
 
-
-#include <media/NdkMediaMuxer.h>
+#include <android_util_Binder.h>
+#include <jni.h>
+#include <media/IMediaHTTPService.h>
 #include <media/NdkMediaCodec.h>
 #include <media/NdkMediaErrorPriv.h>
 #include <media/NdkMediaFormatPriv.h>
-
-
-#include <utils/Log.h>
-#include <utils/StrongPointer.h>
+#include <media/NdkMediaMuxer.h>
+#include <media/stagefright/MediaAppender.h>
+#include <media/stagefright/MediaMuxer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaMuxer.h>
-#include <media/IMediaHTTPService.h>
-#include <android_util_Binder.h>
-
-#include <jni.h>
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
 
 using namespace android;
 
 struct AMediaMuxer {
-    sp<MediaMuxer> mImpl;
-
+    sp<MediaMuxerBase> mImpl;
 };
 
 extern "C" {
@@ -46,8 +42,15 @@
 EXPORT
 AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format) {
     ALOGV("ctor");
-    AMediaMuxer *mData = new AMediaMuxer();
-    mData->mImpl = new MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+    AMediaMuxer *mData = new (std::nothrow) AMediaMuxer();
+    if (mData == nullptr) {
+        return nullptr;
+    }
+    mData->mImpl = new (std::nothrow) MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+    if (mData->mImpl == nullptr) {
+        delete mData;
+        return nullptr;
+    }
     return mData;
 }
 
@@ -94,6 +97,34 @@
             muxer->mImpl->writeSampleData(buf, trackIdx, info->presentationTimeUs, info->flags));
 }
 
+EXPORT
+AMediaMuxer* AMediaMuxer_append(int fd, AppendMode mode) {
+    ALOGV("append");
+    AMediaMuxer* mData = new (std::nothrow) AMediaMuxer();
+    if (mData == nullptr) {
+        return nullptr;
+    }
+    mData->mImpl = MediaAppender::create(fd, (android::MediaAppender::AppendMode)mode);
+    if (mData->mImpl == nullptr) {
+        delete mData;
+        return nullptr;
+    }
+    return mData;
+}
+
+EXPORT
+ssize_t AMediaMuxer_getTrackCount(AMediaMuxer* muxer) {
+    return muxer->mImpl->getTrackCount();
+}
+
+EXPORT
+AMediaFormat* AMediaMuxer_getTrackFormat(AMediaMuxer* muxer, size_t idx) {
+    sp<AMessage> format = muxer->mImpl->getTrackFormat(idx);
+    if (format != nullptr) {
+        return AMediaFormat_fromMsg(&format);
+    }
+    return nullptr;
+}
 
 } // extern "C"
 
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 476bbd9..fbd855d 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -307,6 +307,9 @@
 extern const char* AMEDIAFORMAT_KEY_THUMBNAIL_CSD_AV1C __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_KEY_XMP_OFFSET __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_KEY_XMP_SIZE __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND __INTRODUCED_IN(31);
 
 extern const char* AMEDIAFORMAT_VIDEO_QP_B_MAX __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_VIDEO_QP_B_MIN __INTRODUCED_IN(31);
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 519e249..866ebfd 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -54,6 +54,17 @@
     AMEDIAMUXER_OUTPUT_FORMAT_THREE_GPP   = 2,
 } OutputFormat;
 
+typedef enum {
+    /* Last group of pictures(GOP) of video track can be incomplete, so it would be safe to
+     * scrap that and rewrite.  If both audio and video tracks are present in a file, then
+     * samples of audio track after last GOP of video would be scrapped too.
+     * If only audio track is present, then no sample would be discarded.
+     */
+    AMEDIAMUXER_APPEND_IGNORE_LAST_VIDEO_GOP = 0,
+    // Keep all existing samples as it is and append new samples after that only.
+    AMEDIAMUXER_APPEND_TO_EXISTING_DATA = 1,
+} AppendMode;
+
 /**
  * Create new media muxer.
  *
@@ -138,6 +149,41 @@
         size_t trackIdx, const uint8_t *data,
         const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
 
+/**
+ * Creates a new media muxer for appending data to an existing MPEG4 file.
+ * This is a synchronous API call and could take a while to return if the existing file is large.
+ * Works for only MPEG4 files that contain a) a single audio track, b) a single video track,
+ * c) a single audio and a single video track.
+ * @param(fd): needs to be opened with read and write permission.  Does not take ownership of
+ * this fd i.e., caller is responsible for closing fd.
+ * @param(mode): AppendMode is an enum that specifies one of the modes of appending data.
+ * @return : Pointer to AMediaMuxer if the file(fd) has tracks already, otherwise, nullptr.
+ * {@link AMediaMuxer_delete} should be used to free the returned pointer.
+ *
+ * Available since API level 31.
+ */
+AMediaMuxer* AMediaMuxer_append(int fd, AppendMode mode) __INTRODUCED_IN(31);
+
+/**
+ * Returns the number of tracks added in the file passed to {@link AMediaMuxer_new} or
+ * the number of existing tracks in the file passed to {@link AMediaMuxer_append}.
+ * Should be called in INITIALIZED or STARTED state, otherwise returns -1.
+ *
+ * Available since API level 31.
+ */
+ssize_t AMediaMuxer_getTrackCount(AMediaMuxer*) __INTRODUCED_IN(31);
+
+/**
+ * Returns AMediaFormat of the added track with index idx in the file passed to
+ * {@link AMediaMuxer_new} or the AMediaFormat of the existing track with index idx
+ * in the file passed to {@link AMediaMuxer_append}.
+ * Should be called in INITIALIZED or STARTED state, otherwise returns nullptr.
+ * {@link AMediaFormat_delete} should be used to free the returned pointer.
+ *
+ * Available since API level 31.
+ */
+AMediaFormat* AMediaMuxer_getTrackFormat(AMediaMuxer* muxer, size_t idx) __INTRODUCED_IN(31);
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_MUXER_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index eead681..7e9e57e 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -109,6 +109,7 @@
     AMEDIAFORMAT_KEY_IS_SYNC_FRAME; # var introduced=29
     AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
     AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
+    AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK; # var introduced=31
     AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
     AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
@@ -134,6 +135,8 @@
     AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
     AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
     AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
+    AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET; # var introduced=31
+    AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND; # var introduced=31
     AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_SAR_HEIGHT; # var introduced=29
     AMEDIAFORMAT_KEY_SAR_WIDTH; # var introduced=29
@@ -286,7 +289,10 @@
     AMediaFormat_setString;
     AMediaFormat_toString;
     AMediaMuxer_addTrack;
+    AMediaMuxer_append; # introduced=31
     AMediaMuxer_delete;
+    AMediaMuxer_getTrackCount; # introduced=31
+    AMediaMuxer_getTrackFormat; # introduced=31
     AMediaMuxer_new;
     AMediaMuxer_setLocation;
     AMediaMuxer_setOrientationHint;
diff --git a/media/tests/SampleVideoEncoder/README.md b/media/tests/SampleVideoEncoder/README.md
index 074c939..2e275c5 100644
--- a/media/tests/SampleVideoEncoder/README.md
+++ b/media/tests/SampleVideoEncoder/README.md
@@ -2,7 +2,7 @@
 
 This is a sample android application for encoding AVC/HEVC streams with B-Frames enabled. It uses MediaRecorder APIs to record B-frames enabled video from camera2 input and MediaCodec APIs to encode reference test vector using input surface.
 
-This page describes how to get started with the Encoder App.
+This page describes how to get started with the Encoder App and how to run the tests for it.
 
 
 # Getting Started
@@ -33,6 +33,17 @@
 
 After installing the app, a TextureView showing camera preview is dispalyed on one third of the screen. It also features checkboxes to select either avc/hevc and hw/sw codecs. It also has an option to select either MediaRecorder APIs or MediaCodec, along with the 'Start' button to start/stop recording.
 
+# Running Tests
+
+The app also contains a test, which will test the MediaCodec APIs for encoding avc/hevc streams with B-frames enabled. This does not require us to use application UI.
+
+## Running the tests using atest
+Note that atest command will install the SampleVideoEncoder app on the device.
+
+Command to run the tests:
+```
+atest SampleVideoEncoder
+```
 
 # Ouput
 
@@ -40,3 +51,6 @@
 ```
 /storage/emulated/0/Android/data/com.android.media.samplevideoencoder/files/
 ```
+
+The total number of I-frames, P-frames and B-frames after encoding has been done using MediaCodec APIs are displayed on the screen.
+The results of the tests can be obtained from the logcats of the test.
diff --git a/media/tests/SampleVideoEncoder/app/Android.bp b/media/tests/SampleVideoEncoder/app/Android.bp
index 3a66955..58b219b 100644
--- a/media/tests/SampleVideoEncoder/app/Android.bp
+++ b/media/tests/SampleVideoEncoder/app/Android.bp
@@ -23,7 +23,7 @@
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
-android_app {
+android_test {
     name: "SampleVideoEncoder",
 
     manifest: "src/main/AndroidManifest.xml",
@@ -41,6 +41,10 @@
         "androidx.annotation_annotation",
         "androidx.appcompat_appcompat",
         "androidx-constraintlayout_constraintlayout",
+        "junit",
+        "androidx.test.core",
+        "androidx.test.runner",
+        "hamcrest-library",
     ],
 
     javacflags: [
diff --git a/media/tests/SampleVideoEncoder/app/AndroidTest.xml b/media/tests/SampleVideoEncoder/app/AndroidTest.xml
new file mode 100644
index 0000000..91f4304
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/AndroidTest.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Runs SampleVideoEncoder Tests">
+    <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+        <option name="cleanup-apks" value="false" />
+        <option name="test-file-name" value="SampleVideoEncoder.apk" />
+    </target_preparer>
+
+    <option name="test-tag" value="SampleVideoEncoder" />
+    <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+        <option name="package" value="com.android.media.samplevideoencoder" />
+        <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+        <option name="hidden-api-checks" value="false"/>
+    </test>
+</configuration>
diff --git a/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java b/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java
new file mode 100644
index 0000000..1ef332e
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.media.samplevideoencoder.tests;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import android.content.Context;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import com.android.media.samplevideoencoder.MediaCodecSurfaceEncoder;
+import com.android.media.samplevideoencoder.R;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertThat;
+
+@RunWith(Parameterized.class)
+public class SampleVideoEncoderTest {
+    private static final String TAG = SampleVideoEncoderTest.class.getSimpleName();
+    private final Context mContext;
+    private int mMaxBFrames;
+    private int mInputResId;
+    private String mMime;
+    private boolean mIsSoftwareEncoder;
+
+    @Parameterized.Parameters
+    public static Collection<Object[]> inputFiles() {
+        return Arrays.asList(new Object[][]{
+                // Parameters: MimeType, isSoftwareEncoder, maxBFrames
+                {MediaFormat.MIMETYPE_VIDEO_AVC, false, 1},
+                {MediaFormat.MIMETYPE_VIDEO_AVC, true, 1},
+                {MediaFormat.MIMETYPE_VIDEO_HEVC, false, 1},
+                {MediaFormat.MIMETYPE_VIDEO_HEVC, true, 1}});
+    }
+
+    public SampleVideoEncoderTest(String mimeType, boolean isSoftwareEncoder, int maxBFrames) {
+        this.mContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
+        this.mInputResId = R.raw.crowd_1920x1080_25fps_4000kbps_h265;
+        this.mMime = mimeType;
+        this.mIsSoftwareEncoder = isSoftwareEncoder;
+        this.mMaxBFrames = maxBFrames;
+    }
+
+    private String getOutputPath() {
+        File dir = mContext.getExternalFilesDir(null);
+        if (dir == null) {
+            Log.e(TAG, "Cannot get external directory path to save output video");
+            return null;
+        }
+        String videoPath = dir.getAbsolutePath() + "/Video-" + System.currentTimeMillis() + ".mp4";
+        Log.i(TAG, "Output video is saved at: " + videoPath);
+        return videoPath;
+    }
+
+    @Test
+    public void testMediaSurfaceEncoder() throws IOException, InterruptedException {
+        String outputFilePath = getOutputPath();
+        MediaCodecSurfaceEncoder surfaceEncoder =
+                new MediaCodecSurfaceEncoder(mContext, mInputResId, mMime, mIsSoftwareEncoder,
+                        outputFilePath, mMaxBFrames);
+        int encodingStatus = surfaceEncoder.startEncodingSurface();
+        assertThat(encodingStatus, is(equalTo(0)));
+        int[] frameNumArray = surfaceEncoder.getFrameTypes();
+        Log.i(TAG, "Results: I-Frames: " + frameNumArray[0] + "; P-Frames: " + frameNumArray[1] +
+                "\n " + "; B-Frames:" + frameNumArray[2]);
+        assertNotEquals("Encoder mime: " + mMime + " isSoftware: " + mIsSoftwareEncoder +
+                " failed to generate B Frames", frameNumArray[2], 0);
+    }
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
index ed668bb..b17541d 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
@@ -38,4 +38,8 @@
         </activity>
     </application>
 
+    <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+        android:targetPackage="com.android.media.samplevideoencoder"
+        android:label="SampleVideoEncoder Test"/>
+
 </manifest>
\ No newline at end of file
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
index 33e81bb..a7a353c 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
@@ -56,6 +56,7 @@
 import android.util.Log;
 import android.util.Size;
 import android.widget.RadioGroup;
+import android.widget.TextView;
 import android.widget.Toast;
 
 import java.lang.ref.WeakReference;
@@ -80,6 +81,14 @@
     private static final int VIDEO_BITRATE = 8000000 /* 8 Mbps */;
     private static final int VIDEO_FRAMERATE = 30;
 
+    /**
+     * Constant values to frame types assigned here are internal to this app.
+     * These values does not correspond to the actual values defined in avc/hevc specifications.
+     */
+    public static final int FRAME_TYPE_I = 0;
+    public static final int FRAME_TYPE_P = 1;
+    public static final int FRAME_TYPE_B = 2;
+
     private String mMime = MediaFormat.MIMETYPE_VIDEO_AVC;
     private String mOutputVideoPath = null;
 
@@ -89,6 +98,7 @@
     private boolean mIsRecording;
 
     private AutoFitTextureView mTextureView;
+    private TextView mTextView;
     private CameraDevice mCameraDevice;
     private CameraCaptureSession mPreviewSession;
     private CaptureRequest.Builder mPreviewBuilder;
@@ -101,6 +111,8 @@
 
     private Button mStartButton;
 
+    private int[] mFrameTypeOccurrences;
+
     @Override
     protected void onCreate(Bundle savedInstanceState) {
         super.onCreate(savedInstanceState);
@@ -129,6 +141,8 @@
         final CheckBox checkBox_mr = findViewById(R.id.checkBox_media_recorder);
         final CheckBox checkBox_mc = findViewById(R.id.checkBox_media_codec);
         mTextureView = findViewById(R.id.texture);
+        mTextView = findViewById(R.id.textViewResults);
+
         checkBox_mr.setOnClickListener(new View.OnClickListener() {
             @Override
             public void onClick(View v) {
@@ -162,6 +176,7 @@
     @Override
     public void onClick(View v) {
         if (v.getId() == R.id.start_button) {
+            mTextView.setText(null);
             if (mIsMediaRecorder) {
                 if (mIsRecording) {
                     stopRecordingVideo();
@@ -198,6 +213,7 @@
                             mainActivity.mOutputVideoPath);
             try {
                 encodingStatus = codecSurfaceEncoder.startEncodingSurface();
+                mainActivity.mFrameTypeOccurrences = codecSurfaceEncoder.getFrameTypes();
             } catch (IOException | InterruptedException e) {
                 e.printStackTrace();
             }
@@ -211,6 +227,13 @@
             if (encodingStatus == 0) {
                 Toast.makeText(mainActivity.getApplicationContext(), "Encoding Completed",
                         Toast.LENGTH_SHORT).show();
+                mainActivity.mTextView.append("\n Encoded stream contains: ");
+                mainActivity.mTextView.append("\n Number of I-Frames: " +
+                        mainActivity.mFrameTypeOccurrences[FRAME_TYPE_I]);
+                mainActivity.mTextView.append("\n Number of P-Frames: " +
+                        mainActivity.mFrameTypeOccurrences[FRAME_TYPE_P]);
+                mainActivity.mTextView.append("\n Number of B-Frames: " +
+                        mainActivity.mFrameTypeOccurrences[FRAME_TYPE_B]);
             } else {
                 Toast.makeText(mainActivity.getApplicationContext(),
                         "Error occurred while " + "encoding", Toast.LENGTH_SHORT).show();
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
index 146a475..011c38c 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
@@ -31,10 +31,14 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
+
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_B;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_I;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_P;
 
 public class MediaCodecSurfaceEncoder {
     private static final String TAG = MediaCodecSurfaceEncoder.class.getSimpleName();
-
     private static final boolean DEBUG = false;
     private static final int VIDEO_BITRATE = 8000000  /*8 Mbps*/;
     private static final int VIDEO_FRAMERATE = 30;
@@ -44,6 +48,8 @@
     private final String mMime;
     private final String mOutputPath;
     private int mTrackID = -1;
+    private int mFrameNum = 0;
+    private int[] mFrameTypeOccurrences = {0, 0, 0};
 
     private Surface mSurface;
     private MediaExtractor mExtractor;
@@ -128,8 +134,10 @@
             mEncoder.reset();
             mSurface.release();
             mSurface = null;
+            Log.i(TAG, "Number of I-frames = " + mFrameTypeOccurrences[FRAME_TYPE_I]);
+            Log.i(TAG, "Number of P-frames = " + mFrameTypeOccurrences[FRAME_TYPE_P]);
+            Log.i(TAG, "Number of B-frames = " + mFrameTypeOccurrences[FRAME_TYPE_B]);
         }
-
         mEncoder.release();
         mDecoder.release();
         mExtractor.release();
@@ -193,6 +201,8 @@
         mSawEncOutputEOS = false;
         mDecOutputCount = 0;
         mEncOutputCount = 0;
+        mFrameNum = 0;
+        Arrays.fill(mFrameTypeOccurrences, 0);
     }
 
     private void configureCodec(MediaFormat decFormat, MediaFormat encFormat) {
@@ -336,6 +346,21 @@
         }
         if (info.size > 0) {
             ByteBuffer buf = mEncoder.getOutputBuffer(bufferIndex);
+            // Parse the buffer to get the frame type
+            if (DEBUG) Log.d(TAG, "[ Frame : " + (mFrameNum++) + " ]");
+            int frameTypeResult = -1;
+            if (mMime == MediaFormat.MIMETYPE_VIDEO_AVC) {
+                frameTypeResult = NalUnitUtil.getStandardizedFrameTypesFromAVC(buf);
+            } else if (mMime == MediaFormat.MIMETYPE_VIDEO_HEVC){
+                frameTypeResult = NalUnitUtil.getStandardizedFrameTypesFromHEVC(buf);
+            } else {
+                Log.e(TAG, "Mime type " + mMime + " is not supported.");
+                return;
+            }
+            if (frameTypeResult != -1) {
+                mFrameTypeOccurrences[frameTypeResult]++;
+            }
+
             if (mMuxer != null) {
                 if (mTrackID == -1) {
                     mTrackID = mMuxer.addTrack(mEncoder.getOutputFormat());
@@ -353,4 +378,8 @@
     private boolean hasSeenError() {
         return mAsyncHandleDecoder.hasSeenError() || mAsyncHandleEncoder.hasSeenError();
     }
+
+    public int[] getFrameTypes() {
+        return mFrameTypeOccurrences;
+    }
 }
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java
new file mode 100644
index 0000000..efff4fd
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.samplevideoencoder;
+
+import android.util.Log;
+
+import java.nio.ByteBuffer;
+
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_B;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_I;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_P;
+
+public class NalUnitUtil {
+    private static final String TAG = MediaCodecSurfaceEncoder.class.getSimpleName();
+    private static final boolean DEBUG = false;
+
+    public static int findNalUnit(byte[] dataArray, int pos, int limit) {
+        int startOffset = 0;
+        if (limit - pos < 4) {
+            return startOffset;
+        }
+        if (dataArray[pos] == 0 && dataArray[pos + 1] == 0 && dataArray[pos + 2] == 1) {
+            startOffset = 3;
+        } else {
+            if (dataArray[pos] == 0 && dataArray[pos + 1] == 0 && dataArray[pos + 2] == 0 &&
+                    dataArray[pos + 3] == 1) {
+                startOffset = 4;
+            }
+        }
+        return startOffset;
+    }
+
+    private static int getAVCNalUnitType(byte[] dataArray, int nalUnitOffset) {
+        return dataArray[nalUnitOffset] & 0x1F;
+    }
+
+    private static int parseAVCNALUnitData(byte[] dataArray, int offset, int limit) {
+        ParsableBitArray bitArray = new ParsableBitArray(dataArray);
+        bitArray.reset(dataArray, offset, limit);
+
+        bitArray.skipBit(); // forbidden_zero_bit
+        bitArray.readBits(2); // nal_ref_idc
+        bitArray.skipBits(5); // nal_unit_type
+
+        bitArray.readUEV(); // first_mb_in_slice
+        if (!bitArray.canReadUEV()) {
+            return -1;
+        }
+        int sliceType = bitArray.readUEV();
+        if (DEBUG) Log.d(TAG, "slice_type = " + sliceType);
+        if (sliceType == 0) {
+            return FRAME_TYPE_P;
+        } else if (sliceType == 1) {
+            return FRAME_TYPE_B;
+        } else if (sliceType == 2) {
+            return FRAME_TYPE_I;
+        } else {
+            return -1;
+        }
+    }
+
+    private static int getHEVCNalUnitType(byte[] dataArray, int nalUnitOffset) {
+        return (dataArray[nalUnitOffset] & 0x7E) >> 1;
+    }
+
+    private static int parseHEVCNALUnitData(byte[] dataArray, int offset, int limit,
+                                            int nalUnitType) {
+        // nal_unit_type values from H.265/HEVC Table 7-1.
+        final int BLA_W_LP = 16;
+        final int RSV_IRAP_VCL23 = 23;
+
+        ParsableBitArray bitArray = new ParsableBitArray(dataArray);
+        bitArray.reset(dataArray, offset, limit);
+
+        bitArray.skipBit(); // forbidden zero bit
+        bitArray.readBits(6); // nal_unit_header
+        bitArray.readBits(6); // nuh_layer_id
+        bitArray.readBits(3); // nuh_temporal_id_plus1
+
+        // Parsing slice_segment_header values from H.265/HEVC Table 7.3.6.1
+        boolean first_slice_segment = bitArray.readBit(); // first_slice_segment_in_pic_flag
+        if (!first_slice_segment) return -1;
+        if (nalUnitType >= BLA_W_LP && nalUnitType <= RSV_IRAP_VCL23) {
+            bitArray.readBit();  // no_output_of_prior_pics_flag
+        }
+        bitArray.readUEV(); // slice_pic_parameter_set_id
+        // Assume num_extra_slice_header_bits element of PPS data to be 0
+        int sliceType = bitArray.readUEV();
+        if (DEBUG) Log.d(TAG, "slice_type = " + sliceType);
+        if (sliceType == 0) {
+            return FRAME_TYPE_B;
+        } else if (sliceType == 1) {
+            return FRAME_TYPE_P;
+        } else if (sliceType == 2) {
+            return FRAME_TYPE_I;
+        } else {
+            return -1;
+        }
+    }
+
+    public static int getStandardizedFrameTypesFromAVC(ByteBuffer buf) {
+        int limit = buf.limit();
+        byte[] dataArray = new byte[buf.remaining()];
+        buf.get(dataArray);
+        int frameType = -1;
+        for (int pos = 0; pos + 3 < limit; ) {
+            int startOffset = NalUnitUtil.findNalUnit(dataArray, pos, limit);
+            if (startOffset != 0) {
+                int nalUnitType = getAVCNalUnitType(dataArray, (pos + startOffset));
+                if (DEBUG) {
+                    Log.d(TAG, "NalUnitOffset = " + (pos + startOffset));
+                    Log.d(TAG, "NalUnitType = " + nalUnitType);
+                }
+                // SLICE_NAL = 1; IDR_SLICE_NAL = 5
+                if (nalUnitType == 1 || nalUnitType == 5) {
+                    frameType = parseAVCNALUnitData(dataArray, (pos + startOffset),
+                            (limit - pos - startOffset));
+                    break;
+                }
+                pos += 3;
+            } else {
+                pos++;
+            }
+        }
+        return frameType;
+    }
+
+    public static int getStandardizedFrameTypesFromHEVC(ByteBuffer buf) {
+        int limit = buf.limit();
+        byte[] dataArray = new byte[buf.remaining()];
+        buf.get(dataArray);
+        int frameType = -1;
+        for (int pos = 0; pos + 3 < limit; ) {
+            int startOffset = NalUnitUtil.findNalUnit(dataArray, pos, limit);
+            if (startOffset != 0) {
+                int nalUnitType = NalUnitUtil.getHEVCNalUnitType(dataArray, (pos + startOffset));
+                if (DEBUG) {
+                    Log.d(TAG, "NalUnitOffset = " + (pos + startOffset));
+                    Log.d(TAG, "NalUnitType = " + nalUnitType);
+                }
+                // Parse NALUnits containing slice_headers which lies in the range of 0 to 21
+                if (nalUnitType >= 0 && nalUnitType <= 21) {
+                    frameType = parseHEVCNALUnitData(dataArray, (pos + startOffset),
+                            (limit - pos - startOffset), nalUnitType);
+                    break;
+                }
+                pos += 3;
+            } else {
+                pos++;
+            }
+        }
+        return frameType;
+    }
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java
new file mode 100644
index 0000000..e4bfaa3
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.samplevideoencoder;
+
+public class ParsableBitArray {
+    public byte[] data;
+    private int byteOffset;
+    private int bitOffset;
+    private int byteLimit;
+
+    public ParsableBitArray(byte[] dataArray) {
+        this(dataArray, dataArray.length);
+    }
+
+    public ParsableBitArray(byte[] dataArray, int limit) {
+        this.data = dataArray;
+        byteLimit = limit;
+    }
+
+    public void reset(byte[] data, int offset, int limit) {
+        this.data = data;
+        byteOffset = offset;
+        bitOffset = 0;
+        byteLimit = limit;
+    }
+
+    public void skipBit() {
+        if (++bitOffset == 8) {
+            bitOffset = 0;
+            byteOffset++;
+        }
+    }
+
+    public void skipBits(int numBits) {
+        int numBytes = numBits / 8;
+        byteOffset += numBytes;
+        bitOffset += numBits - (numBytes * 8);
+        if (bitOffset > 7) {
+            byteOffset++;
+            bitOffset -= 8;
+        }
+    }
+
+    public boolean readBit() {
+        boolean returnValue = (data[byteOffset] & (0x80 >> bitOffset)) != 0;
+        skipBit();
+        return returnValue;
+    }
+
+    public int readBits(int numBits) {
+        if (numBits == 0) {
+            return 0;
+        }
+        int returnValue = 0;
+        bitOffset += numBits;
+        while (bitOffset > 8) {
+            bitOffset -= 8;
+            returnValue |= (data[byteOffset++] & 0xFF) << bitOffset;
+        }
+        returnValue |= (data[byteOffset] & 0xFF) >> (8 - bitOffset);
+        returnValue &= 0xFFFFFFFF >>> (32 - numBits);
+        if (bitOffset == 8) {
+            bitOffset = 0;
+            byteOffset++;
+        }
+        return returnValue;
+    }
+
+    public boolean canReadUEV() {
+        int initialByteOffset = byteOffset;
+        int initialBitOffset = bitOffset;
+        int leadingZeros = 0;
+        while (byteOffset < byteLimit && !readBit()) {
+            leadingZeros++;
+        }
+        boolean hitLimit = byteOffset == byteLimit;
+        byteOffset = initialByteOffset;
+        bitOffset = initialBitOffset;
+        return !hitLimit && canReadBits(leadingZeros * 2 + 1);
+    }
+
+    public int readUEV() {
+        int leadingZeros = 0;
+        while (!readBit()) {
+            leadingZeros++;
+        }
+        return (1 << leadingZeros) - 1 + (leadingZeros > 0 ? readBits(leadingZeros) : 0);
+    }
+
+    public boolean canReadBits(int numBits) {
+        int oldByteOffset = byteOffset;
+        int numBytes = numBits / 8;
+        int newByteOffset = byteOffset + numBytes;
+        int newBitOffset = bitOffset + numBits - (numBytes * 8);
+        if (newBitOffset > 7) {
+            newByteOffset++;
+            newBitOffset -= 8;
+        }
+        for (int i = oldByteOffset + 1; i <= newByteOffset && newByteOffset < byteLimit; i++) {
+            if (shouldSkipByte(i)) {
+                // Skip the byte and check three bytes ahead.
+                newByteOffset++;
+                i += 2;
+            }
+        }
+        return newByteOffset < byteLimit || (newByteOffset == byteLimit && newBitOffset == 0);
+    }
+
+    private boolean shouldSkipByte(int offset) {
+        return (2 <= offset && offset < byteLimit && data[offset] == (byte) 0x03 &&
+                data[offset - 2] == (byte) 0x00 && data[offset - 1] == (byte) 0x00);
+    }
+
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml b/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
index 164e02a..017012d 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
@@ -124,4 +124,15 @@
 
     </FrameLayout>
 
+    <TextView
+        android:id="@+id/textViewResults"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_marginTop="10dp"
+        android:fontFamily="sans-serif-medium"
+        android:textSize="18sp"
+        android:textStyle="normal"
+        app:layout_constraintStart_toStartOf="parent"
+        app:layout_constraintTop_toBottomOf = "@+id/frameLayout2" />
+
 </androidx.constraintlayout.widget.ConstraintLayout>
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 26cdc3a..52dc0cf 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -37,6 +37,8 @@
     ],
     static_libs: [
         "libc_malloc_debug_backtrace",
+        "libbatterystats_aidl",
+        "libprocessinfoservice_aidl",
     ],
     shared_libs: [
         "libaudioclient_aidl_conversion",
@@ -50,6 +52,9 @@
         "android.hidl.token@1.0-utils",
         "media_permission-aidl-cpp",
     ],
+    export_static_lib_headers: [
+        "libbatterystats_aidl",
+    ],
 
     logtags: ["EventLogTags.logtags"],
 
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index 19225d3..e212794 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -21,9 +21,9 @@
 #include <media/stagefright/ProcessInfo.h>
 
 #include <binder/IPCThreadState.h>
-#include <binder/IProcessInfoService.h>
 #include <binder/IServiceManager.h>
 #include <private/android_filesystem_config.h>
+#include <processinfo/IProcessInfoService.h>
 
 namespace android {
 
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index 187ef7c..b245834 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -10,6 +10,7 @@
 cc_defaults {
     name: "libmediautils_fuzzer_defaults",
     shared_libs: [
+        "libbatterystats_aidl",
         "libbinder",
         "libcutils",
         "liblog",
diff --git a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
index 4521853..130feee 100644
--- a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
+++ b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 #define LOG_TAG "BatteryNotifierFuzzer"
-#include <binder/IBatteryStats.h>
+#include <batterystats/IBatteryStats.h>
 #include <binder/IServiceManager.h>
 #include <utils/String16.h>
 #include <android/log.h>
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index a4e42ad..3812d7a 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -17,7 +17,7 @@
 #ifndef MEDIA_BATTERY_NOTIFIER_H
 #define MEDIA_BATTERY_NOTIFIER_H
 
-#include <binder/IBatteryStats.h>
+#include <batterystats/IBatteryStats.h>
 #include <utils/Singleton.h>
 #include <utils/String8.h>
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7a89805..7cdac30 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -278,6 +278,21 @@
   return NO_ERROR;
 }
 
+status_t AudioFlinger::setVibratorInfos(
+        const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+    Mutex::Autolock _l(mLock);
+    mAudioVibratorInfos = vibratorInfos;
+    return NO_ERROR;
+}
+
+// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
+const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
+    if (mAudioVibratorInfos.empty()) {
+        return nullptr;
+    }
+    return &mAudioVibratorInfos.front();
+}
+
 AudioFlinger::~AudioFlinger()
 {
     while (!mRecordThreads.isEmpty()) {
@@ -4122,7 +4137,8 @@
         case TransactionCode::SET_MIC_MUTE:
         case TransactionCode::SET_LOW_RAM_DEVICE:
         case TransactionCode::SYSTEM_READY:
-        case TransactionCode::SET_AUDIO_HAL_PIDS: {
+        case TransactionCode::SET_AUDIO_HAL_PIDS:
+        case TransactionCode::SET_VIBRATOR_INFOS: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1cfdffc..a980752 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -267,6 +267,8 @@
 
     virtual status_t setAudioHalPids(const std::vector<pid_t>& pids);
 
+    virtual status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+
     status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
 
     // end of IAudioFlinger interface
@@ -296,6 +298,8 @@
     void updateDownStreamPatches_l(const struct audio_patch *patch,
                                    const std::set<audio_io_handle_t> streams);
 
+    const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+
 private:
     // FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
     static const size_t kLogMemorySize = 400 * 1024;
@@ -971,6 +975,8 @@
     SimpleLog  mAppSetParameterLog;
     SimpleLog  mSystemSetParameterLog;
 
+    std::vector<media::AudioVibratorInfo> mAudioVibratorInfos;
+
     static inline constexpr const char *mMetricsId = AMEDIAMETRICS_KEY_AUDIO_FLINGER;
 };
 
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 7e06096..d8565bd 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -173,22 +173,15 @@
     return status;
 }
 
-audio_format_t AudioStreamOut::getFormat() const
+audio_config_base_t AudioStreamOut::getAudioProperties() const
 {
-    audio_format_t result;
-    return stream->getFormat(&result) == OK ? result : AUDIO_FORMAT_INVALID;
-}
-
-uint32_t AudioStreamOut::getSampleRate() const
-{
-    uint32_t result;
-    return stream->getSampleRate(&result) == OK ? result : 0;
-}
-
-audio_channel_mask_t AudioStreamOut::getChannelMask() const
-{
-    audio_channel_mask_t result;
-    return stream->getChannelMask(&result) == OK ? result : AUDIO_CHANNEL_INVALID;
+    audio_config_base_t result = AUDIO_CONFIG_BASE_INITIALIZER;
+    if (stream->getAudioProperties(&result) != OK) {
+        result.sample_rate = 0;
+        result.channel_mask = AUDIO_CHANNEL_INVALID;
+        result.format = AUDIO_FORMAT_INVALID;
+    }
+    return result;
 }
 
 int AudioStreamOut::flush()
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 16fbcf2..565f43a 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -81,22 +81,14 @@
     virtual size_t getFrameSize() const { return mHalFrameSize; }
 
     /**
-     * @return format from the perspective of the application and the AudioFlinger.
+     * @return audio stream configuration: channel mask, format, sample rate:
+     *   - channel mask from the perspective of the application and the AudioFlinger,
+     *     The HAL is in stereo mode when playing multi-channel compressed audio over HDMI;
+     *   - format from the perspective of the application and the AudioFlinger;
+     *   - sample rate from the perspective of the application and the AudioFlinger,
+     *     The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
      */
-    virtual audio_format_t getFormat() const;
-
-    /**
-     * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
-     * @return sample rate from the perspective of the application and the AudioFlinger.
-     */
-    virtual uint32_t getSampleRate() const;
-
-    /**
-     * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
-     * @return channel mask from the perspective of the application and the AudioFlinger.
-     */
-    virtual audio_channel_mask_t getChannelMask() const;
-
+    virtual audio_config_base_t getAudioProperties() const;
 
     virtual status_t flush();
     virtual status_t standby();
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 031e0cf..d75b13b 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1585,6 +1585,34 @@
     return status;
 }
 
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+{
+    if (mStatus != NO_ERROR) {
+        return mStatus;
+    }
+    if (!isHapticGenerator()) {
+        ALOGW("Should not set vibrator info for effects that are not HapticGenerator");
+        return INVALID_OPERATION;
+    }
+
+    std::vector<uint8_t> request(
+            sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+    effect_param_t *param = (effect_param_t*) request.data();
+    param->psize = sizeof(int32_t);
+    param->vsize = 2 * sizeof(float);
+    *(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
+    float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
+    vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
+    vibratorInfoPtr[1] = vibratorInfo->qFactor;
+    std::vector<uint8_t> response;
+    status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
+    if (status == NO_ERROR) {
+        LOG_ALWAYS_FATAL_IF(response.size() != sizeof(status_t));
+        status = *reinterpret_cast<const status_t*>(response.data());
+    }
+    return status;
+}
+
 static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
     std::stringstream ss;
 
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 8e82d53..9da95bc 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -258,6 +258,7 @@
     bool             isHapticGenerator() const;
 
     status_t         setHapticIntensity(int id, int intensity);
+    status_t         setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
 
     void             dump(int fd, const Vector<String16>& args);
 
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 2e59baa..2436248 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -219,6 +219,10 @@
     void flushAck();
     bool isResumePending();
     void resumeAck();
+    // For direct or offloaded tracks ensure that the pause state is acknowledged
+    // by the playback thread in case of an immediate flush.
+    bool isPausePending() const { return mPauseHwPending; }
+    void pauseAck();
     void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
             uint32_t halSampleRate, const ExtendedTimestamp &timeStamp);
 
@@ -314,6 +318,7 @@
     sp<AudioTrackServerProxy>  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
+    bool                mPauseHwPending = false; // direct/offload track request for thread pause
     audio_output_flags_t mFlags;
     // If the last track change was notified to the client with readAndClearHasChanged
     std::atomic_flag     mChangeNotified = ATOMIC_FLAG_INIT;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7f91a54..4b8d919 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1439,6 +1439,16 @@
             effect->setMode(mAudioFlinger->getMode());
             effect->setAudioSource(mAudioSource);
         }
+        if (effect->isHapticGenerator()) {
+            // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
+            // for the HapticGenerator.
+            const media::AudioVibratorInfo* defaultVibratorInfo =
+                    mAudioFlinger->getDefaultVibratorInfo_l();
+            if (defaultVibratorInfo != nullptr) {
+                // Only set the vibrator info when it is a valid one.
+                effect->setVibratorInfo(defaultVibratorInfo);
+            }
+        }
         // create effect handle and connect it to effect module
         handle = new EffectHandle(effect, client, effectClient, priority);
         lStatus = handle->initCheck();
@@ -2757,8 +2767,9 @@
 void AudioFlinger::PlaybackThread::readOutputParameters_l()
 {
     // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
-    mSampleRate = mOutput->getSampleRate();
-    mChannelMask = mOutput->getChannelMask();
+    const audio_config_base_t audioConfig = mOutput->getAudioProperties();
+    mSampleRate = audioConfig.sample_rate;
+    mChannelMask = audioConfig.channel_mask;
     if (!audio_is_output_channel(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
@@ -2771,11 +2782,11 @@
     mBalance.setChannelMask(mChannelMask);
 
     // Get actual HAL format.
-    status_t result = mOutput->stream->getFormat(&mHALFormat);
+    status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
     // Get format from the shim, which will be different than the HAL format
     // if playing compressed audio over HDMI passthrough.
-    mFormat = mOutput->getFormat();
+    mFormat = audioConfig.format;
     if (!audio_is_valid_format(mFormat)) {
         LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
     }
@@ -5869,8 +5880,15 @@
         sp<Track> l = mActiveTracks.getLatest();
         bool last = l.get() == track;
 
-        if (track->isPausing()) {
-            track->setPaused();
+        if (track->isPausePending()) {
+            track->pauseAck();
+            // It is possible a track might have been flushed or stopped.
+            // Other operations such as flush pending might occur on the next prepare.
+            if (track->isPausing()) {
+                track->setPaused();
+            }
+            // Always perform pause, as an immediate flush will change
+            // the pause state to be no longer isPausing().
             if (mHwSupportsPause && last && !mHwPaused) {
                 doHwPause = true;
                 mHwPaused = true;
@@ -6412,8 +6430,15 @@
             continue;
         }
 
-        if (track->isPausing()) {
-            track->setPaused();
+        if (track->isPausePending()) {
+            track->pauseAck();
+            // It is possible a track might have been flushed or stopped.
+            // Other operations such as flush pending might occur on the next prepare.
+            if (track->isPausing()) {
+                track->setPaused();
+            }
+            // Always perform pause if last, as an immediate flush will change
+            // the pause state to be no longer isPausing().
             if (last) {
                 if (mHwSupportsPause && !mHwPaused) {
                     doHwPause = true;
@@ -8424,13 +8449,11 @@
         }
         if (reconfig) {
             if (status == BAD_VALUE) {
-                uint32_t sRate;
-                audio_channel_mask_t channelMask;
-                audio_format_t format;
-                if (mInput->stream->getAudioProperties(&sRate, &channelMask, &format) == OK &&
-                        audio_is_linear_pcm(format) && audio_is_linear_pcm(reqFormat) &&
-                        sRate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
-                        audio_channel_count_from_in_mask(channelMask) <= FCC_8) {
+                audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+                if (mInput->stream->getAudioProperties(&config) == OK &&
+                        audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
+                        config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
+                        audio_channel_count_from_in_mask(config.channel_mask) <= FCC_8) {
                     status = NO_ERROR;
                 }
             }
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index db7528d..21651af 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1219,6 +1219,9 @@
             mState = PAUSING;
             ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
                     __func__, mId, (int)mThreadIoHandle);
+            if (isOffloadedOrDirect()) {
+                mPauseHwPending = true;
+            }
             playbackThread->broadcast_l();
             break;
 
@@ -1306,6 +1309,11 @@
     mFlushHwPending = false;
 }
 
+void AudioFlinger::PlaybackThread::Track::pauseAck()
+{
+    mPauseHwPending = false;
+}
+
 void AudioFlinger::PlaybackThread::Track::reset()
 {
     // Do not reset twice to avoid discarding data written just after a flush and before
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index c6bdb04..c2a20c6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -77,6 +77,7 @@
 
     sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
                                                        const DeviceVector &availableDeviceTypes,
+                                                       uid_t uid,
                                                        sp<AudioPolicyMix> *policyMix) const;
 
     /**
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 05ec69e..20b4044 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -226,7 +226,9 @@
             add(devices);
             return size();
         }
-        return SortedVector::merge(devices);
+        ssize_t ret = SortedVector::merge(devices);
+        refreshTypes();
+        return ret;
     }
 
     /**
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index c024a85..b209a88 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -391,6 +391,7 @@
 sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
         audio_source_t inputSource,
         const DeviceVector &availDevices,
+        uid_t uid,
         sp<AudioPolicyMix> *policyMix) const
 {
     for (size_t i = 0; i < size(); i++) {
@@ -402,7 +403,11 @@
             if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
                     mix->mCriteria[j].mValue.mSource == inputSource) ||
                (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mValue.mSource != inputSource)) {
+                    mix->mCriteria[j].mValue.mSource != inputSource) ||
+               (RULE_MATCH_UID == mix->mCriteria[j].mRule &&
+                    mix->mCriteria[j].mValue.mUid == uid) ||
+               (RULE_EXCLUDE_UID == mix->mCriteria[j].mRule &&
+                    mix->mCriteria[j].mValue.mUid != uid)) {
                 // assuming PolicyMix only for remote submix for input
                 // so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
                 audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index 9bef97c..0f8b0a5 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -170,11 +170,13 @@
     status_t getMediaDevicesForRole(device_role_t role, const DeviceVector& availableDevices,
             DeviceVector& devices) const;
 
+    void dumpCapturePresetDevicesRoleMap(String8 *dst, int spaces) const;
+
     AudioPolicyManagerObserver *mApmObserver = nullptr;
 
     ProductStrategyMap mProductStrategies;
-    ProductStrategyPreferredRoutingMap mProductStrategyPreferredDevices;
-    CapturePresetDevicesRoleMap mCapturePresetDevicesRole;
+    ProductStrategyDevicesRoleMap mProductStrategyDeviceRoleMap;
+    CapturePresetDevicesRoleMap mCapturePresetDevicesRoleMap;
     VolumeGroupMap mVolumeGroups;
     LastRemovableMediaDevices mLastRemovableMediaDevices;
     audio_mode_t mPhoneState = AUDIO_MODE_NORMAL;  /**< current phone state. */
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 54625ea..2aa2f9a 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -18,20 +18,20 @@
 
 #include "VolumeGroup.h"
 
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <HandleGenerator.h>
-#include <string>
-#include <vector>
 #include <map>
-#include <utils/Errors.h>
-#include <utils/String8.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <HandleGenerator.h>
 #include <media/AudioAttributes.h>
 #include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioPolicy.h>
-
-#include <vector>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
 
 namespace android {
 
@@ -170,11 +170,12 @@
     product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
 };
 
-class ProductStrategyPreferredRoutingMap : public std::map<product_strategy_t,
-                                                           AudioDeviceTypeAddrVector>
-{
-public:
-    void dump(String8 *dst, int spaces = 0) const;
-};
+using ProductStrategyDevicesRoleMap =
+        std::map<std::pair<product_strategy_t, device_role_t>, AudioDeviceTypeAddrVector>;
+
+void dumpProductStrategyDevicesRoleMap(
+        const ProductStrategyDevicesRoleMap& productStrategyDeviceRoleMap,
+        String8 *dst,
+        int spaces);
 
 } // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index e275306..150a9a8 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -17,6 +17,10 @@
 #define LOG_TAG "APM::AudioPolicyEngine/Base"
 //#define LOG_NDEBUG 0
 
+#include <functional>
+#include <string>
+#include <sys/stat.h>
+
 #include "EngineBase.h"
 #include "EngineDefaultConfig.h"
 #include <TypeConverter.h>
@@ -148,8 +152,13 @@
         });
         return iter != end(volumeGroups);
     };
+    auto fileExists = [](const char* path) {
+        struct stat fileStat;
+        return stat(path, &fileStat) == 0 && S_ISREG(fileStat.st_mode);
+    };
 
-    auto result = engineConfig::parse();
+    auto result = fileExists(engineConfig::DEFAULT_PATH) ?
+            engineConfig::parse(engineConfig::DEFAULT_PATH) : engineConfig::ParsingResult{};
     if (result.parsedConfig == nullptr) {
         ALOGD("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
         engineConfig::Config config = gDefaultEngineConfig;
@@ -342,23 +351,33 @@
     return NO_ERROR;
 }
 
-status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
-            const AudioDeviceTypeAddrVector &devices)
-{
-    // verify strategy exists
-    if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
-        ALOGE("%s invalid strategy %u", __func__, strategy);
+namespace {
+template <typename T>
+status_t setDevicesRoleForT(
+        std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+        T t, device_role_t role, const AudioDeviceTypeAddrVector &devices,
+        const std::string& logStr, std::function<bool(T)> p) {
+    if (!p(t)) {
+        ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
         return BAD_VALUE;
     }
 
     switch (role) {
     case DEVICE_ROLE_PREFERRED:
-        mProductStrategyPreferredDevices[strategy] = devices;
-        break;
-    case DEVICE_ROLE_DISABLED:
-        // TODO (b/184065221): support set devices role as disabled for strategy.
-        ALOGI("%s no implemented for role as %d", __func__, role);
-        break;
+    case DEVICE_ROLE_DISABLED: {
+        tDevicesRoleMap[std::make_pair(t, role)] = devices;
+        // The preferred devices and disabled devices are mutually exclusive. Once a device is added
+        // the a list, it must be removed from the other one.
+        const device_role_t roleToRemove = role == DEVICE_ROLE_PREFERRED ? DEVICE_ROLE_DISABLED
+                                                                         : DEVICE_ROLE_PREFERRED;
+        auto it = tDevicesRoleMap.find(std::make_pair(t, roleToRemove));
+        if (it != tDevicesRoleMap.end()) {
+            it->second = excludeDeviceTypeAddrsFrom(it->second, devices);
+            if (it->second.empty()) {
+                tDevicesRoleMap.erase(it);
+            }
+        }
+    } break;
     case DEVICE_ROLE_NONE:
         // Intentionally fall-through as it is no need to set device role as none for a strategy.
     default:
@@ -368,28 +387,26 @@
     return NO_ERROR;
 }
 
-status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
-{
-    // verify strategy exists
-    if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
-        ALOGE("%s invalid strategy %u", __func__, strategy);
+template <typename T>
+status_t removeAllDevicesRoleForT(
+        std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+        T t, device_role_t role, const std::string& logStr, std::function<bool(T)> p) {
+    if (!p(t)) {
+        ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
         return BAD_VALUE;
     }
 
     switch (role) {
     case DEVICE_ROLE_PREFERRED:
-        if (mProductStrategyPreferredDevices.erase(strategy) == 0) {
-            // no preferred device was set
+    case DEVICE_ROLE_DISABLED:
+        if (tDevicesRoleMap.erase(std::make_pair(t, role)) == 0) {
+            // no preferred/disabled device was set
             return NAME_NOT_FOUND;
         }
         break;
-    case DEVICE_ROLE_DISABLED:
-        // TODO (b/184065221): support remove devices role as disabled for strategy.
-        ALOGI("%s no implemented for role as %d", __func__, role);
-        break;
     case DEVICE_ROLE_NONE:
         // Intentionally fall-through as it makes no sense to remove devices with
-        // role as DEVICE_ROLE_NONE for a strategy
+        // role as DEVICE_ROLE_NONE
     default:
         ALOGE("%s invalid role %d", __func__, role);
         return BAD_VALUE;
@@ -397,30 +414,27 @@
     return NO_ERROR;
 }
 
-status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
-            AudioDeviceTypeAddrVector &devices) const
-{
-    // verify strategy exists
-    if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
-        ALOGE("%s unknown strategy %u", __func__, strategy);
+template <typename T>
+status_t getDevicesRoleForT(
+        const std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+        T t, device_role_t role, AudioDeviceTypeAddrVector &devices, const std::string& logStr,
+        std::function<bool(T)> p) {
+    if (!p(t)) {
+        ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
         return BAD_VALUE;
     }
 
     switch (role) {
-    case DEVICE_ROLE_PREFERRED: {
-        // preferred device for this strategy?
-        auto devIt = mProductStrategyPreferredDevices.find(strategy);
-        if (devIt == mProductStrategyPreferredDevices.end()) {
-            ALOGV("%s no preferred device for strategy %u", __func__, strategy);
+    case DEVICE_ROLE_PREFERRED:
+    case DEVICE_ROLE_DISABLED: {
+        auto it = tDevicesRoleMap.find(std::make_pair(t, role));
+        if (it == tDevicesRoleMap.end()) {
+            ALOGV("%s no device as role %u for %s %u", __func__, role, logStr.c_str(), t);
             return NAME_NOT_FOUND;
         }
 
-        devices = devIt->second;
+        devices = it->second;
     } break;
-    case DEVICE_ROLE_DISABLED:
-        // TODO (b/184065221): support devices role as disabled for strategy.
-        ALOGV("%s no implemented for role as %d", __func__, role);
-        break;
     case DEVICE_ROLE_NONE:
         // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
     default:
@@ -430,32 +444,45 @@
     return NO_ERROR;
 }
 
+} // namespace
+
+status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
+            const AudioDeviceTypeAddrVector &devices)
+{
+    std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+        return mProductStrategies.find(strategy) != mProductStrategies.end();
+    };
+    return setDevicesRoleForT(
+            mProductStrategyDeviceRoleMap, strategy, role, devices, "strategy" /*logStr*/, p);
+}
+
+status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
+{
+    std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+        return mProductStrategies.find(strategy) != mProductStrategies.end();
+    };
+    return removeAllDevicesRoleForT(
+            mProductStrategyDeviceRoleMap, strategy, role, "strategy" /*logStr*/, p);
+}
+
+status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
+            AudioDeviceTypeAddrVector &devices) const
+{
+    std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+        return mProductStrategies.find(strategy) != mProductStrategies.end();
+    };
+    return getDevicesRoleForT(
+            mProductStrategyDeviceRoleMap, strategy, role, devices, "strategy" /*logStr*/, p);
+}
+
 status_t EngineBase::setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
         const AudioDeviceTypeAddrVector &devices)
 {
-    // verify if the audio source is valid
-    if (!audio_is_valid_audio_source(audioSource)) {
-        ALOGE("%s unknown audio source %u", __func__, audioSource);
-    }
-
-    switch (role) {
-    case DEVICE_ROLE_PREFERRED:
-        mCapturePresetDevicesRole[audioSource][role] = devices;
-        // When the devices are set as preferred devices, remove them from the disabled devices.
-        doRemoveDevicesRoleForCapturePreset(
-                audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
-        break;
-    case DEVICE_ROLE_DISABLED:
-        // TODO: support setting devices role as disabled for capture preset.
-        ALOGI("%s no implemented for role as %d", __func__, role);
-        break;
-    case DEVICE_ROLE_NONE:
-        // Intentionally fall-through as it is no need to set device role as none
-    default:
-        ALOGE("%s invalid role %d", __func__, role);
-        return BAD_VALUE;
-    }
-    return NO_ERROR;
+    std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+        return audio_is_valid_audio_source(audioSource);
+    };
+    return setDevicesRoleForT(
+            mCapturePresetDevicesRoleMap, audioSource, role, devices, "audio source" /*logStr*/, p);
 }
 
 status_t EngineBase::addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
@@ -468,19 +495,20 @@
 
     switch (role) {
     case DEVICE_ROLE_PREFERRED:
-        mCapturePresetDevicesRole[audioSource][role] = excludeDeviceTypeAddrsFrom(
-                mCapturePresetDevicesRole[audioSource][role], devices);
-        for (const auto& device : devices) {
-            mCapturePresetDevicesRole[audioSource][role].push_back(device);
+    case DEVICE_ROLE_DISABLED: {
+        const auto audioSourceRole = std::make_pair(audioSource, role);
+        mCapturePresetDevicesRoleMap[audioSourceRole] = excludeDeviceTypeAddrsFrom(
+                mCapturePresetDevicesRoleMap[audioSourceRole], devices);
+        for (const auto &device : devices) {
+            mCapturePresetDevicesRoleMap[audioSourceRole].push_back(device);
         }
         // When the devices are set as preferred devices, remove them from the disabled devices.
         doRemoveDevicesRoleForCapturePreset(
-                audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
-        break;
-    case DEVICE_ROLE_DISABLED:
-        // TODO: support setting devices role as disabled for capture preset.
-        ALOGI("%s no implemented for role as %d", __func__, role);
-        break;
+                audioSource,
+                role == DEVICE_ROLE_PREFERRED ? DEVICE_ROLE_DISABLED : DEVICE_ROLE_PREFERRED,
+                devices,
+                false /*forceMatched*/);
+    } break;
     case DEVICE_ROLE_NONE:
         // Intentionally fall-through as it is no need to set device role as none
     default:
@@ -506,21 +534,22 @@
     switch (role) {
     case DEVICE_ROLE_PREFERRED:
     case DEVICE_ROLE_DISABLED: {
-        if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
-                mCapturePresetDevicesRole[audioSource].count(role) == 0) {
+        const auto audioSourceRole = std::make_pair(audioSource, role);
+        if (mCapturePresetDevicesRoleMap.find(audioSourceRole) ==
+                mCapturePresetDevicesRoleMap.end()) {
             return NAME_NOT_FOUND;
         }
         AudioDeviceTypeAddrVector remainingDevices = excludeDeviceTypeAddrsFrom(
-                mCapturePresetDevicesRole[audioSource][role], devices);
+                mCapturePresetDevicesRoleMap[audioSourceRole], devices);
         if (forceMatched && remainingDevices.size() !=
-                mCapturePresetDevicesRole[audioSource][role].size() - devices.size()) {
+                mCapturePresetDevicesRoleMap[audioSourceRole].size() - devices.size()) {
             // There are some devices from `devicesToRemove` that are not shown in the cached record
             return BAD_VALUE;
         }
-        mCapturePresetDevicesRole[audioSource][role] = remainingDevices;
-        if (mCapturePresetDevicesRole[audioSource][role].empty()) {
+        mCapturePresetDevicesRoleMap[audioSourceRole] = remainingDevices;
+        if (mCapturePresetDevicesRoleMap[audioSourceRole].empty()) {
             // Remove the role when device list is empty
-            mCapturePresetDevicesRole[audioSource].erase(role);
+            mCapturePresetDevicesRoleMap.erase(audioSourceRole);
         }
     } break;
     case DEVICE_ROLE_NONE:
@@ -536,63 +565,21 @@
 status_t EngineBase::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
                                                       device_role_t role)
 {
-    // verify if the audio source is valid
-    if (!audio_is_valid_audio_source(audioSource)) {
-        ALOGE("%s unknown audio source %u", __func__, audioSource);
-    }
-
-    switch (role) {
-    case DEVICE_ROLE_PREFERRED:
-        if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
-                mCapturePresetDevicesRole[audioSource].erase(role) == 0) {
-            // no preferred device for the given audio source
-            return NAME_NOT_FOUND;
-        }
-        break;
-    case DEVICE_ROLE_DISABLED:
-        // TODO: support remove devices role as disabled for strategy.
-        ALOGI("%s no implemented for role as %d", __func__, role);
-        break;
-    case DEVICE_ROLE_NONE:
-        // Intentionally fall-through as it makes no sense to remove devices with
-        // role as DEVICE_ROLE_NONE for a strategy
-    default:
-        ALOGE("%s invalid role %d", __func__, role);
-        return BAD_VALUE;
-    }
-    return NO_ERROR;
+    std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+        return audio_is_valid_audio_source(audioSource);
+    };
+    return removeAllDevicesRoleForT(
+            mCapturePresetDevicesRoleMap, audioSource, role, "audio source" /*logStr*/, p);
 }
 
 status_t EngineBase::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
         device_role_t role, AudioDeviceTypeAddrVector &devices) const
 {
-    // verify if the audio source is valid
-    if (!audio_is_valid_audio_source(audioSource)) {
-        ALOGE("%s unknown audio source %u", __func__, audioSource);
-        return BAD_VALUE;
-    }
-
-    switch (role) {
-    case DEVICE_ROLE_PREFERRED:
-    case DEVICE_ROLE_DISABLED: {
-        if (mCapturePresetDevicesRole.count(audioSource) == 0) {
-            return NAME_NOT_FOUND;
-        }
-        auto devIt = mCapturePresetDevicesRole.at(audioSource).find(role);
-        if (devIt == mCapturePresetDevicesRole.at(audioSource).end()) {
-            ALOGV("%s no devices role(%d) for capture preset %u", __func__, role, audioSource);
-            return NAME_NOT_FOUND;
-        }
-
-        devices = devIt->second;
-    } break;
-    case DEVICE_ROLE_NONE:
-        // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
-    default:
-        ALOGE("%s invalid role %d", __func__, role);
-        return BAD_VALUE;
-    }
-    return NO_ERROR;
+    std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+        return audio_is_valid_audio_source(audioSource);
+    };
+    return getDevicesRoleForT(
+            mCapturePresetDevicesRoleMap, audioSource, role, devices, "audio source" /*logStr*/, p);
 }
 
 status_t EngineBase::getMediaDevicesForRole(device_role_t role,
@@ -634,10 +621,22 @@
     return activeDevices;
 }
 
+void EngineBase::dumpCapturePresetDevicesRoleMap(String8 *dst, int spaces) const
+{
+    dst->appendFormat("\n%*sDevice role per capture preset dump:", spaces, "");
+    for (const auto& [capturePresetRolePair, devices] : mCapturePresetDevicesRoleMap) {
+        dst->appendFormat("\n%*sCapture preset(%u) Device Role(%u) Devices(%s)", spaces + 2, "",
+                capturePresetRolePair.first, capturePresetRolePair.second,
+                dumpAudioDeviceTypeAddrVector(devices, true /*includeSensitiveInfo*/).c_str());
+    }
+    dst->appendFormat("\n");
+}
+
 void EngineBase::dump(String8 *dst) const
 {
     mProductStrategies.dump(dst, 2);
-    mProductStrategyPreferredDevices.dump(dst, 2);
+    dumpProductStrategyDevicesRoleMap(mProductStrategyDeviceRoleMap, dst, 2);
+    dumpCapturePresetDevicesRoleMap(dst, 2);
     mVolumeGroups.dump(dst, 2);
 }
 
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index d4cea5a..b3d144f 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -320,14 +320,15 @@
     }
 }
 
-void ProductStrategyPreferredRoutingMap::dump(android::String8* dst, int spaces) const {
-    dst->appendFormat("\n%*sPreferred devices per product strategy dump:", spaces, "");
-    for (const auto& iter : *this) {
-        dst->appendFormat("\n%*sStrategy %u %s",
-                          spaces + 2, "",
-                          (uint32_t) iter.first,
-                          dumpAudioDeviceTypeAddrVector(iter.second, true /*includeSensitiveInfo*/)
-                                  .c_str());
+void dumpProductStrategyDevicesRoleMap(
+        const ProductStrategyDevicesRoleMap& productStrategyDeviceRoleMap,
+        String8 *dst,
+        int spaces) {
+    dst->appendFormat("\n%*sDevice role per product strategy dump:", spaces, "");
+    for (const auto& [strategyRolePair, devices] : productStrategyDeviceRoleMap) {
+        dst->appendFormat("\n%*sStrategy(%u) Device Role(%u) Devices(%s)", spaces + 2, "",
+                strategyRolePair.first, strategyRolePair.second,
+                dumpAudioDeviceTypeAddrVector(devices, true /*includeSensitiveInfo*/).c_str());
     }
     dst->appendFormat("\n");
 }
diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
index f0a01d3..518f86e 100644
--- a/services/audiopolicy/engine/interface/EngineInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include <utility>
+
 #include <AudioPolicyManagerObserver.h>
 #include <media/AudioProductStrategy.h>
 #include <media/AudioVolumeGroup.h>
@@ -35,7 +37,7 @@
 using StrategyVector = std::vector<product_strategy_t>;
 using VolumeGroupVector = std::vector<volume_group_t>;
 using CapturePresetDevicesRoleMap =
-        std::map<audio_source_t, std::map<device_role_t, AudioDeviceTypeAddrVector>>;
+        std::map<std::pair<audio_source_t, device_role_t>, AudioDeviceTypeAddrVector>;
 
 /**
  * This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -171,8 +173,10 @@
      * @param[out] mix to be used if a mix has been installed for the given audio attributes.
      * @return selected input device for the audio attributes, may be null if error.
      */
-    virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
-            const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const = 0;
+    virtual sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                             uid_t uid = 0,
+                                                             sp<AudioPolicyMix> *mix = nullptr)
+                                                             const = 0;
 
     /**
      * Get the legacy stream type for a given audio attributes.
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 6d42fcf..b0c376a 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -310,6 +310,7 @@
 }
 
 sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                         uid_t uid,
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -328,7 +329,10 @@
         return device;
     }
 
-    device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+    device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+                                                       availableInputDevices,
+                                                       uid,
+                                                       mix);
     if (device != nullptr) {
         return device;
     }
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 3b371d8..d8e2742 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -61,8 +61,10 @@
     DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
                                            bool fromCache = false) const override;
 
-    sp<DeviceDescriptor> getInputDeviceForAttributes(
-            const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
+    sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                     uid_t uid = 0,
+                                                     sp<AudioPolicyMix> *mix = nullptr)
+                                                     const override;
 
     void updateDeviceSelectionCache() override;
 
diff --git a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
index 5083b14..43b3dd2 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
@@ -172,12 +172,6 @@
         logging.info("added stub input device mask")
 
     # Transform input source in inclusive criterion
-    shift = len(all_component_types['OutputDevicesMask'])
-    if shift > 32:
-        logging.critical("OutputDevicesMask incompatible with criterion representation on 32 bits")
-        logging.info("EXIT ON FAILURE")
-        exit(1)
-
     for component_types in all_component_types:
         values = ','.join('{}:{}'.format(value, key) for key, value in all_component_types[component_types].items())
         logging.info("{}: <{}>".format(component_types, values))
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 1a903a6..27f89e3 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -707,6 +707,7 @@
 }
 
 sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                         uid_t uid,
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -726,7 +727,10 @@
         return device;
     }
 
-    device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+    device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+                                                       availableInputDevices,
+                                                       uid,
+                                                       mix);
     if (device != nullptr) {
         return device;
     }
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 98f59d3..595e289 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -62,8 +62,10 @@
     DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
                                            bool fromCache = false) const override;
 
-    sp<DeviceDescriptor> getInputDeviceForAttributes(
-            const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
+    sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                     uid_t uid = 0,
+                                                     sp<AudioPolicyMix> *mix = nullptr)
+                                                     const override;
 
     void updateDeviceSelectionCache() override;
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 2b9f8d7..dd44c54 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -284,7 +284,7 @@
                 setOutputDevices(desc, newDevices, force, 0);
             }
             if (!desc->isDuplicated() && desc->mProfile->hasDynamicAudioProfile() &&
-                    desc->devices() != activeMediaDevices &&
+                    !activeMediaDevices.empty() && desc->devices() != activeMediaDevices &&
                     desc->supportsDevicesForPlayback(activeMediaDevices)) {
                 // Reopen the output to query the dynamic profiles when there is not active
                 // clients or all active clients will be rerouted. Otherwise, set the flag
@@ -2228,7 +2228,8 @@
         } else {
             // Prevent from storing invalid requested device id in clients
             requestedDeviceId = AUDIO_PORT_HANDLE_NONE;
-            device = mEngine->getInputDeviceForAttributes(attributes, &policyMix);
+            device = mEngine->getInputDeviceForAttributes(attributes, uid, &policyMix);
+            ALOGV("%s found device type is 0x%X", __FUNCTION__, device->type());
         }
         if (device == nullptr) {
             ALOGW("getInputForAttr() could not find device for source %d", attributes.source);
@@ -2614,7 +2615,7 @@
             bool close = false;
             for (const auto& client : input->clientsList()) {
                 sp<DeviceDescriptor> device =
-                    mEngine->getInputDeviceForAttributes(client->attributes());
+                    mEngine->getInputDeviceForAttributes(client->attributes(), client->uid());
                 if (!input->supportedDevices().contains(device)) {
                     close = true;
                     break;
@@ -5858,12 +5859,22 @@
 
     // If we are not in call and no client is active on this input, this methods returns
     // a null sp<>, causing the patch on the input stream to be released.
-    audio_attributes_t attributes = inputDesc->getHighestPriorityAttributes();
+    audio_attributes_t attributes;
+    uid_t uid;
+    sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
+    if (topClient != nullptr) {
+      attributes = topClient->attributes();
+      uid = topClient->uid();
+    } else {
+      attributes = { .source = AUDIO_SOURCE_DEFAULT };
+      uid = 0;
+    }
+
     if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
         attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     }
     if (attributes.source != AUDIO_SOURCE_DEFAULT) {
-        device = mEngine->getInputDeviceForAttributes(attributes);
+        device = mEngine->getInputDeviceForAttributes(attributes, uid);
     }
 
     return device;
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index d5ba756..14be671 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -23,6 +23,7 @@
     ],
 
     shared_libs: [
+        "libactivitymanager_aidl",
         "libaudioclient",
         "libaudioclient_aidl_conversion",
         "libaudiofoundation",
@@ -67,6 +68,7 @@
     ],
 
     export_shared_lib_headers: [
+        "libactivitymanager_aidl",
         "libsensorprivacy",
         "media_permission-aidl-cpp",
     ],
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index a0b35a8..32c0267 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -113,6 +113,7 @@
         "libutilscallstack",
         "libutils",
         "libbinder",
+        "libactivitymanager_aidl",
         "libcutils",
         "libmedia",
         "libmediautils",
@@ -154,12 +155,14 @@
     ],
 
     static_libs: [
+        "libprocessinfoservice_aidl",
         "libbinderthreadstateutils",
         "media_permission-aidl-cpp",
     ],
 
     export_shared_lib_headers: [
         "libbinder",
+        "libactivitymanager_aidl",
         "libcamera_client",
         "libfmq",
         "libsensorprivacy",
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 6cd20a1..eb24a93 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -41,7 +41,6 @@
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <binder/PermissionController.h>
-#include <binder/ProcessInfoService.h>
 #include <binder/IResultReceiver.h>
 #include <binderthreadstate/CallerUtils.h>
 #include <cutils/atomic.h>
@@ -57,6 +56,7 @@
 #include <media/IMediaHTTPService.h>
 #include <media/mediaplayer.h>
 #include <mediautils/BatteryNotifier.h>
+#include <processinfo/ProcessInfoService.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/String16.h>
@@ -224,10 +224,16 @@
     return OK;
 }
 
-void CameraService::broadcastTorchModeStatus(const String8& cameraId, TorchModeStatus status) {
+void CameraService::broadcastTorchModeStatus(const String8& cameraId, TorchModeStatus status,
+        SystemCameraKind systemCameraKind) {
     Mutex::Autolock lock(mStatusListenerLock);
-
     for (auto& i : mListenerList) {
+        if (shouldSkipStatusUpdates(systemCameraKind, i->isVendorListener(), i->getListenerPid(),
+                i->getListenerUid())) {
+            ALOGV("Skipping torch callback for system-only camera device %s",
+                    cameraId.c_str());
+            continue;
+        }
         i->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
     }
 }
@@ -317,7 +323,7 @@
         Mutex::Autolock al(mTorchStatusMutex);
         mTorchStatusMap.add(id, TorchModeStatus::AVAILABLE_OFF);
 
-        broadcastTorchModeStatus(id, TorchModeStatus::AVAILABLE_OFF);
+        broadcastTorchModeStatus(id, TorchModeStatus::AVAILABLE_OFF, deviceKind);
     }
 
     updateCameraNumAndIds();
@@ -478,12 +484,19 @@
 
 void CameraService::onTorchStatusChanged(const String8& cameraId,
         TorchModeStatus newStatus) {
+    SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
+    status_t res = getSystemCameraKind(cameraId, &systemCameraKind);
+    if (res != OK) {
+        ALOGE("%s: Could not get system camera kind for camera id %s", __FUNCTION__,
+                cameraId.string());
+        return;
+    }
     Mutex::Autolock al(mTorchStatusMutex);
-    onTorchStatusChangedLocked(cameraId, newStatus);
+    onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
 }
 
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
-        TorchModeStatus newStatus) {
+        TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
             __FUNCTION__, cameraId.string(), newStatus);
 
@@ -532,8 +545,7 @@
             }
         }
     }
-
-    broadcastTorchModeStatus(cameraId, newStatus);
+    broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
 }
 
 static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
@@ -1812,6 +1824,10 @@
     String8 id = String8(cameraId.string());
     int uid = CameraThreadState::getCallingUid();
 
+    if (shouldRejectSystemCameraConnection(id)) {
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to set torch mode"
+                " for system only device %s: ", id.string());
+    }
     // verify id is valid.
     auto state = getCameraState(id);
     if (state == nullptr) {
@@ -2168,6 +2184,11 @@
                     return shouldSkipStatusUpdates(deviceKind, isVendorListener, clientPid,
                             clientUid);}), cameraStatuses->end());
 
+    //cameraStatuses will have non-eligible camera ids removed.
+    std::set<String16> idsChosenForCallback;
+    for (const auto &s : *cameraStatuses) {
+        idsChosenForCallback.insert(String16(s.cameraId));
+    }
 
     /*
      * Immediately signal current torch status to this listener only
@@ -2177,7 +2198,11 @@
         Mutex::Autolock al(mTorchStatusMutex);
         for (size_t i = 0; i < mTorchStatusMap.size(); i++ ) {
             String16 id = String16(mTorchStatusMap.keyAt(i).string());
-            listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
+            // The camera id is visible to the client. Fine to send torch
+            // callback.
+            if (idsChosenForCallback.find(id) != idsChosenForCallback.end()) {
+                listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
+            }
         }
     }
 
@@ -3747,7 +3772,7 @@
                             TorchModeStatus::AVAILABLE_OFF :
                             TorchModeStatus::NOT_AVAILABLE;
                     if (torchStatus != newTorchStatus) {
-                        onTorchStatusChangedLocked(cameraId, newTorchStatus);
+                        onTorchStatusChangedLocked(cameraId, newTorchStatus, deviceKind);
                     }
                 }
             }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 092d916..98d4500 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -991,7 +991,8 @@
     // handle torch mode status change and invoke callbacks. mTorchStatusMutex
     // should be locked.
     void onTorchStatusChangedLocked(const String8& cameraId,
-            hardware::camera::common::V1_0::TorchModeStatus newStatus);
+            hardware::camera::common::V1_0::TorchModeStatus newStatus,
+            SystemCameraKind systemCameraKind);
 
     // get a camera's torch status. mTorchStatusMutex should be locked.
     status_t getTorchStatusLocked(const String8 &cameraId,
@@ -1085,7 +1086,8 @@
 
 
     void broadcastTorchModeStatus(const String8& cameraId,
-            hardware::camera::common::V1_0::TorchModeStatus status);
+            hardware::camera::common::V1_0::TorchModeStatus status,
+            SystemCameraKind systemCameraKind);
 
     void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
 
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 62fc18f..8942d05 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -1243,7 +1243,9 @@
     interface = mServiceProxy->tryGetService(newProvider);
 
     if (interface == nullptr) {
-        ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+        // The interface may not be started yet. In that case, this is not a
+        // fatal error.
+        ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
                 newProvider.c_str());
         return BAD_VALUE;
     }
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index bfc722e..7ee731e 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -23,6 +23,7 @@
 
 #include <pwd.h> //getpwuid
 
+#include <android-base/stringprintf.h>
 #include <android/content/pm/IPackageManagerNative.h>  // package info
 #include <audio_utils/clock.h>                 // clock conversions
 #include <binder/IPCThreadState.h>             // get calling uid
@@ -37,6 +38,7 @@
 
 namespace android {
 
+using base::StringPrintf;
 using mediametrics::Item;
 using mediametrics::startsWith;
 
@@ -211,14 +213,12 @@
 
 status_t MediaMetricsService::dump(int fd, const Vector<String16>& args)
 {
-    String8 result;
-
     if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
-        result.appendFormat("Permission Denial: "
+        const std::string result = StringPrintf("Permission Denial: "
                 "can't dump MediaMetricsService from pid=%d, uid=%d\n",
                 IPCThreadState::self()->getCallingPid(),
                 IPCThreadState::self()->getCallingUid());
-        write(fd, result.string(), result.size());
+        write(fd, result.c_str(), result.size());
         return NO_ERROR;
     }
 
@@ -250,17 +250,18 @@
             // dumpsys media.metrics audiotrack,codec
             // or dumpsys media.metrics audiotrack codec
 
-            result.append("Recognized parameters:\n");
-            result.append("--all         show all records\n");
-            result.append("--clear       clear out saved records\n");
-            result.append("--heap        show heap usage (top 100)\n");
-            result.append("--help        display help\n");
-            result.append("--prefix X    process records for component X\n");
-            result.append("--since X     X < 0: records from -X seconds in the past\n");
-            result.append("              X = 0: ignore\n");
-            result.append("              X > 0: records from X seconds since Unix epoch\n");
-            result.append("--unreachable show unreachable memory (leaks)\n");
-            write(fd, result.string(), result.size());
+            static constexpr char result[] =
+                    "Recognized parameters:\n"
+                    "--all         show all records\n"
+                    "--clear       clear out saved records\n"
+                    "--heap        show heap usage (top 100)\n"
+                    "--help        display help\n"
+                    "--prefix X    process records for component X\n"
+                    "--since X     X < 0: records from -X seconds in the past\n"
+                    "              X = 0: ignore\n"
+                    "              X > 0: records from X seconds since Unix epoch\n"
+                    "--unreachable show unreachable memory (leaks)\n";
+            write(fd, result, std::size(result));
             return NO_ERROR;
         } else if (args[i] == prefixOption) {
             ++i;
@@ -286,7 +287,7 @@
             unreachable = true;
         }
     }
-
+    std::stringstream result;
     {
         std::lock_guard _l(mLock);
 
@@ -295,21 +296,22 @@
             mItems.clear();
             mAudioAnalytics.clear();
         } else {
-            result.appendFormat("Dump of the %s process:\n", kServiceName);
+            result << StringPrintf("Dump of the %s process:\n", kServiceName);
             const char *prefixptr = prefix.size() > 0 ? prefix.c_str() : nullptr;
-            dumpHeaders(result, sinceNs, prefixptr);
-            dumpQueue(result, sinceNs, prefixptr);
+            result << dumpHeaders(sinceNs, prefixptr);
+            result << dumpQueue(sinceNs, prefixptr);
 
             // TODO: maybe consider a better way of dumping audio analytics info.
             const int32_t linesToDump = all ? INT32_MAX : 1000;
             auto [ dumpString, lines ] = mAudioAnalytics.dump(linesToDump, sinceNs, prefixptr);
-            result.append(dumpString.c_str());
+            result << dumpString;
             if (lines == linesToDump) {
-                result.append("-- some lines may be truncated --\n");
+                result << "-- some lines may be truncated --\n";
             }
         }
     }
-    write(fd, result.string(), result.size());
+    const std::string str = result.str();
+    write(fd, str.c_str(), str.size());
 
     // Check heap and unreachable memory outside of lock.
     if (heap) {
@@ -327,38 +329,37 @@
 }
 
 // dump headers
-void MediaMetricsService::dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix)
+std::string MediaMetricsService::dumpHeaders(int64_t sinceNs, const char* prefix)
 {
+    std::stringstream result;
     if (mediametrics::Item::isEnabled()) {
-        result.append("Metrics gathering: enabled\n");
+        result << "Metrics gathering: enabled\n";
     } else {
-        result.append("Metrics gathering: DISABLED via property\n");
+        result << "Metrics gathering: DISABLED via property\n";
     }
-    result.appendFormat(
+    result << StringPrintf(
             "Since Boot: Submissions: %lld Accepted: %lld\n",
             (long long)mItemsSubmitted.load(), (long long)mItemsFinalized);
-    result.appendFormat(
+    result << StringPrintf(
             "Records Discarded: %lld (by Count: %lld by Expiration: %lld)\n",
             (long long)mItemsDiscarded, (long long)mItemsDiscardedCount,
             (long long)mItemsDiscardedExpire);
     if (prefix != nullptr) {
-        result.appendFormat("Restricting to prefix %s", prefix);
+        result << "Restricting to prefix " << prefix << "\n";
     }
     if (sinceNs != 0) {
-        result.appendFormat(
-            "Emitting Queue entries more recent than: %lld\n",
-            (long long)sinceNs);
+        result << "Emitting Queue entries more recent than: " << sinceNs << "\n";
     }
+    return result.str();
 }
 
 // TODO: should prefix be a set<string>?
-void MediaMetricsService::dumpQueue(String8 &result, int64_t sinceNs, const char* prefix)
+std::string MediaMetricsService::dumpQueue(int64_t sinceNs, const char* prefix)
 {
     if (mItems.empty()) {
-        result.append("empty\n");
-        return;
+        return "empty\n";
     }
-
+    std::stringstream result;
     int slot = 0;
     for (const auto &item : mItems) {         // TODO: consider std::lower_bound() on mItems
         if (item->getTimestamp() < sinceNs) { // sinceNs == 0 means all items shown
@@ -369,9 +370,10 @@
                     __func__, item->getKey().c_str(), prefix);
             continue;
         }
-        result.appendFormat("%5d: %s\n", slot, item->toString().c_str());
+        result << StringPrintf("%5d: %s\n", slot, item->toString().c_str());
         slot++;
     }
+    return result.str();
 }
 
 //
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/MediaMetricsService.h
index 8bc8019..6234656 100644
--- a/services/mediametrics/MediaMetricsService.h
+++ b/services/mediametrics/MediaMetricsService.h
@@ -100,8 +100,8 @@
     bool expirations(const std::shared_ptr<const mediametrics::Item>& item) REQUIRES(mLock);
 
     // support for generating output
-    void dumpQueue(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
-    void dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
+    std::string dumpQueue(int64_t sinceNs, const char* prefix) REQUIRES(mLock);
+    std::string dumpHeaders(int64_t sinceNs, const char* prefix) REQUIRES(mLock);
 
     // support statsd pushed atoms
     static bool isPullable(const std::string &key);
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 926de3e..db61061 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -74,6 +74,9 @@
         "ResourceManagerService.cpp",
         "ResourceObserverService.cpp",
         "ServiceLog.cpp",
+
+        // TODO: convert to AIDL?
+        "IMediaResourceMonitor.cpp",
     ],
 
     shared_libs: [
diff --git a/services/mediaresourcemanager/IMediaResourceMonitor.cpp b/services/mediaresourcemanager/IMediaResourceMonitor.cpp
new file mode 100644
index 0000000..42d7feb
--- /dev/null
+++ b/services/mediaresourcemanager/IMediaResourceMonitor.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IMediaResourceMonitor.h"
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+#include <sys/types.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------
+
+class BpMediaResourceMonitor : public BpInterface<IMediaResourceMonitor> {
+public:
+    explicit BpMediaResourceMonitor(const sp<IBinder>& impl)
+        : BpInterface<IMediaResourceMonitor>(impl) {}
+
+    virtual void notifyResourceGranted(/*in*/ int32_t pid, /*in*/ const int32_t type)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaResourceMonitor::getInterfaceDescriptor());
+        data.writeInt32(pid);
+        data.writeInt32(type);
+        remote()->transact(NOTIFY_RESOURCE_GRANTED, data, &reply, IBinder::FLAG_ONEWAY);
+    }
+};
+
+IMPLEMENT_META_INTERFACE(MediaResourceMonitor, "android.media.IMediaResourceMonitor")
+
+// ----------------------------------------------------------------------
+
+// NOLINTNEXTLINE(google-default-arguments)
+status_t BnMediaResourceMonitor::onTransact( uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags) {
+    switch(code) {
+        case NOTIFY_RESOURCE_GRANTED: {
+            CHECK_INTERFACE(IMediaResourceMonitor, data, reply);
+            int32_t pid = data.readInt32();
+            const int32_t type = data.readInt32();
+            notifyResourceGranted(/*in*/ pid, /*in*/ type);
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------
+
+} // namespace android
diff --git a/services/mediaresourcemanager/IMediaResourceMonitor.h b/services/mediaresourcemanager/IMediaResourceMonitor.h
new file mode 100644
index 0000000..f92d557
--- /dev/null
+++ b/services/mediaresourcemanager/IMediaResourceMonitor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#ifndef __ANDROID_VNDK__
+
+#include <binder/IInterface.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------
+
+class IMediaResourceMonitor : public IInterface {
+public:
+    DECLARE_META_INTERFACE(MediaResourceMonitor)
+
+    // Values should be in sync with Intent.EXTRA_MEDIA_RESOURCE_TYPE_XXX.
+    enum {
+        TYPE_VIDEO_CODEC = 0,
+        TYPE_AUDIO_CODEC = 1,
+    };
+
+    virtual void notifyResourceGranted(/*in*/ int32_t pid, /*in*/ const int32_t type) = 0;
+
+    enum {
+        NOTIFY_RESOURCE_GRANTED = IBinder::FIRST_CALL_TRANSACTION,
+    };
+};
+
+// ----------------------------------------------------------------------
+
+class BnMediaResourceMonitor : public BnInterface<IMediaResourceMonitor> {
+public:
+    // NOLINTNEXTLINE(google-default-arguments)
+    virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+            uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------
+
+} // namespace android
+
+#else // __ANDROID_VNDK__
+#error "This header is not visible to vendors"
+#endif // __ANDROID_VNDK__
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 289cffd..953686b 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -21,7 +21,6 @@
 
 #include <android/binder_manager.h>
 #include <android/binder_process.h>
-#include <binder/IMediaResourceMonitor.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <cutils/sched_policy.h>
@@ -36,6 +35,7 @@
 #include <sys/time.h>
 #include <unistd.h>
 
+#include "IMediaResourceMonitor.h"
 #include "ResourceManagerService.h"
 #include "ResourceObserverService.h"
 #include "ServiceLog.h"
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index 4df5a9f..cb180ec 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -25,6 +25,7 @@
     ],
 
     shared_libs: [
+        "libactivitymanager_aidl",
         "libbinder",
         "libbinder_ndk",
         "liblog",
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
index 0d462d1..20e4bfb 100644
--- a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -449,13 +449,17 @@
     template <bool expectation = success>
     bool getClientUids(int32_t sessionId, std::vector<int32_t>* clientUids) {
         constexpr bool shouldSucceed = (expectation == success);
-        bool result;
-        Status status = mClient->getClientUids(sessionId, clientUids, &result);
+        std::optional<std::vector<int32_t>> aidl_return;
+        Status status = mClient->getClientUids(sessionId, &aidl_return);
 
         EXPECT_TRUE(status.isOk());
-        EXPECT_EQ(result, shouldSucceed);
+        bool success = (aidl_return != std::nullopt);
+        if (success) {
+            *clientUids = *aidl_return;
+        }
+        EXPECT_EQ(success, shouldSucceed);
 
-        return status.isOk() && (result == shouldSucceed);
+        return status.isOk() && (success == shouldSucceed);
     }
 
     int32_t mClientId;
@@ -524,8 +528,24 @@
         EXPECT_TRUE(mClient3->unregisterClient().isOk());
     }
 
+    const char* prepareOutputFile(const char* path) {
+        deleteFile(path);
+        return path;
+    }
+
     void deleteFile(const char* path) { unlink(path); }
 
+    void dismissKeyguard() {
+        EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+        EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+    }
+
+    void stopAppPackages() {
+        EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+        EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+        EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+    }
+
     std::shared_ptr<IMediaTranscodingService> mService;
     std::shared_ptr<TestClientCallback> mClient1;
     std::shared_ptr<TestClientCallback> mClient2;
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
index 0550d77..e9eebe2 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
@@ -33,7 +33,7 @@
 
 namespace media {
 
-constexpr int64_t kPaddingUs = 400000;
+constexpr int64_t kPaddingUs = 1000000;
 constexpr int64_t kSessionWithPaddingUs = 10000000 + kPaddingUs;
 constexpr int32_t kBitRate = 8 * 1000 * 1000;  // 8Mbs
 
@@ -56,8 +56,7 @@
     registerMultipleClients();
 
     const char* srcPath = "bad_file_uri";
-    const char* dstPath = OUTPATH(TestInvalidSource);
-    deleteFile(dstPath);
+    const char* dstPath = prepareOutputFile(OUTPATH(TestInvalidSource));
 
     // Submit one session.
     EXPECT_TRUE(
@@ -73,8 +72,7 @@
 TEST_F(MediaTranscodingServiceRealTest, TestPassthru) {
     registerMultipleClients();
 
-    const char* dstPath = OUTPATH(TestPassthru);
-    deleteFile(dstPath);
+    const char* dstPath = prepareOutputFile(OUTPATH(TestPassthru));
 
     // Submit one session.
     EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath));
@@ -89,8 +87,7 @@
 TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideo) {
     registerMultipleClients();
 
-    const char* dstPath = OUTPATH(TestTranscodeVideo);
-    deleteFile(dstPath);
+    const char* dstPath = prepareOutputFile(OUTPATH(TestTranscodeVideo));
 
     // Submit one session.
     EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath, TranscodingSessionPriority::kNormal,
@@ -106,8 +103,7 @@
 TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideoProgress) {
     registerMultipleClients();
 
-    const char* dstPath = OUTPATH(TestTranscodeVideoProgress);
-    deleteFile(dstPath);
+    const char* dstPath = prepareOutputFile(OUTPATH(TestTranscodeVideoProgress));
 
     // Submit one session.
     EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath, TranscodingSessionPriority::kNormal,
@@ -134,11 +130,9 @@
 
     const char* srcPath0 = kLongSrcPath;
     const char* srcPath1 = kShortSrcPath;
-    const char* dstPath0 = OUTPATH(TestCancelImmediately_Session0);
-    const char* dstPath1 = OUTPATH(TestCancelImmediately_Session1);
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestCancelImmediately_Session0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestCancelImmediately_Session1));
 
-    deleteFile(dstPath0);
-    deleteFile(dstPath1);
     // Submit one session, should start immediately.
     EXPECT_TRUE(
             mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
@@ -166,11 +160,9 @@
 
     const char* srcPath0 = kLongSrcPath;
     const char* srcPath1 = kShortSrcPath;
-    const char* dstPath0 = OUTPATH(TestCancelWhileRunning_Session0);
-    const char* dstPath1 = OUTPATH(TestCancelWhileRunning_Session1);
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestCancelWhileRunning_Session0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestCancelWhileRunning_Session1));
 
-    deleteFile(dstPath0);
-    deleteFile(dstPath1);
     // Submit two sessions, session 0 should start immediately, session 1 should be queued.
     EXPECT_TRUE(
             mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
@@ -197,10 +189,8 @@
 
     const char* srcPath0 = kLongSrcPath;
     const char* srcPath1 = kShortSrcPath;
-    const char* dstPath0 = OUTPATH(TestPauseResumeSingleClient_Session0);
-    const char* dstPath1 = OUTPATH(TestPauseResumeSingleClient_Session1);
-    deleteFile(dstPath0);
-    deleteFile(dstPath1);
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestPauseResumeSingleClient_Session0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestPauseResumeSingleClient_Session1));
 
     // Submit one offline session, should start immediately.
     EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kUnspecified,
@@ -244,20 +234,15 @@
 TEST_F(MediaTranscodingServiceRealTest, TestPauseResumeMultiClients) {
     ALOGD("TestPauseResumeMultiClients starting...");
 
-    EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
-    EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
-    EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
-    EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
-    EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+    dismissKeyguard();
+    stopAppPackages();
 
     registerMultipleClients();
 
     const char* srcPath0 = kLongSrcPath;
     const char* srcPath1 = kShortSrcPath;
-    const char* dstPath0 = OUTPATH(TestPauseResumeMultiClients_Client0);
-    const char* dstPath1 = OUTPATH(TestPauseResumeMultiClients_Client1);
-    deleteFile(dstPath0);
-    deleteFile(dstPath1);
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestPauseResumeMultiClients_Client0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestPauseResumeMultiClients_Client1));
 
     ALOGD("Moving app A to top...");
     EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
@@ -294,12 +279,177 @@
 
     unregisterMultipleClients();
 
-    EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
-    EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
-    EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+    stopAppPackages();
 
     ALOGD("TestPauseResumeMultiClients finished.");
 }
 
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneForeground) {
+    ALOGD("TestUidGoneForeground starting...");
+
+    dismissKeyguard();
+    stopAppPackages();
+
+    registerMultipleClients();
+
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+    // Test kill foreground app, using only 1 uid.
+    ALOGD("Moving app A to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+    // Submit sessions to Client1 (app A).
+    ALOGD("Submitting sessions to client1 (app A) ...");
+    EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+    EXPECT_TRUE(mClient1->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::NoEvent);
+
+    // Kill app A, expect to see A's session pause followed by B's session start,
+    // then A's session cancelled with error code kUidGoneCancelled.
+    EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+    EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 0));
+    EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+    EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 1));
+    EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+    unregisterMultipleClients();
+
+    stopAppPackages();
+
+    ALOGD("TestUidGoneForeground finished.");
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneForegroundMultiUids) {
+    ALOGD("TestUidGoneForegroundMultiUids starting...");
+
+    dismissKeyguard();
+    stopAppPackages();
+
+    registerMultipleClients();
+
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+    // Test kill foreground app, using two uids.
+    ALOGD("Moving app B to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+    EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+    EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+    // Make app A also requesting session 1.
+    EXPECT_TRUE(mClient2->addClientUid(1, mClient1->mClientUid));
+
+    ALOGD("Moving app A to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 1));
+
+    // Kill app A, CLIENT(2)'s session 1 should continue because it's also requested by app B.
+    EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+
+    // Kill app B, sessions should be cancelled.
+    EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+    EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 1));
+    EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+    unregisterMultipleClients();
+
+    stopAppPackages();
+
+    ALOGD("TestUidGoneForegroundMultiUids finished.");
+}
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneBackground) {
+    ALOGD("TestUidGoneBackground starting...");
+
+    dismissKeyguard();
+    stopAppPackages();
+
+    registerMultipleClients();
+
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+    // Test kill background app, using two uids.
+    ALOGD("Moving app B to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+    EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+    EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+
+    ALOGD("Moving app A to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+    EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+    EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+    // Kill app B, all its sessions should be cancelled.
+    EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+    EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 1));
+    EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+    unregisterMultipleClients();
+
+    stopAppPackages();
+
+    ALOGD("TestUidGoneBackground finished.");
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneBackgroundMultiUids) {
+    ALOGD("TestUidGoneBackgroundMultiUids starting...");
+
+    dismissKeyguard();
+    stopAppPackages();
+
+    registerMultipleClients();
+
+    const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+    const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+    // Test kill background app, using two uids.
+    ALOGD("Moving app B to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+    EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+    EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+                                 kBitRate));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+    // Make app A also requesting session 1.
+    EXPECT_TRUE(mClient2->addClientUid(1, mClient1->mClientUid));
+
+    ALOGD("Moving app A to top...");
+    EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 1));
+
+    // Kill app B, CLIENT(2)'s session 1 should continue to run, session 0 on
+    // the other hand should be cancelled.
+    EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+    EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+    EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+    unregisterMultipleClients();
+
+    stopAppPackages();
+
+    ALOGD("TestUidGoneBackgroundMultiUids finished.");
+}
+
 }  // namespace media
 }  // namespace android
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 501e8c0..0d453cf 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -111,7 +111,7 @@
     if (!endpoint->isConnected()) {
         ALOGD("%s() call safeReleaseCloseFromCallback()", __func__);
         // Release and close under a lock with no check for callback collisions.
-        endpoint->getStreamInternal()->safeReleaseCloseFromCallback();
+        endpoint->getStreamInternal()->safeReleaseCloseInternal();
     }
 
     return result;
diff --git a/services/tuner/TunerDescrambler.cpp b/services/tuner/TunerDescrambler.cpp
index 16338db..bdf826c 100644
--- a/services/tuner/TunerDescrambler.cpp
+++ b/services/tuner/TunerDescrambler.cpp
@@ -67,8 +67,9 @@
         return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->addPid(getHidlDemuxPid(pid),
-            static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter());
+    sp<IFilter> halFilter = (optionalSourceFilter == NULL)
+            ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
+    Result res = mDescrambler->addPid(getHidlDemuxPid(pid), halFilter);
     if (res != Result::SUCCESS) {
         return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
     }
@@ -82,8 +83,9 @@
         return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->removePid(getHidlDemuxPid(pid),
-            static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter());
+    sp<IFilter> halFilter = (optionalSourceFilter == NULL)
+            ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
+    Result res = mDescrambler->removePid(getHidlDemuxPid(pid), halFilter);
     if (res != Result::SUCCESS) {
         return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
     }