Merge "Support float data on aaudio legacy record fast path." into tm-dev
diff --git a/camera/CameraSessionStats.cpp b/camera/CameraSessionStats.cpp
index 2a07ffc..05341bf 100644
--- a/camera/CameraSessionStats.cpp
+++ b/camera/CameraSessionStats.cpp
@@ -52,6 +52,12 @@
         return err;
     }
 
+    float maxPreviewFps = 0;
+    if ((err = parcel->readFloat(&maxPreviewFps)) != OK) {
+        ALOGE("%s: Failed to read maxPreviewFps from parcel", __FUNCTION__);
+        return err;
+    }
+
     int dataSpace = 0;
     if ((err = parcel->readInt32(&dataSpace)) != OK) {
         ALOGE("%s: Failed to read dataSpace from parcel", __FUNCTION__);
@@ -112,14 +118,14 @@
         return err;
     }
 
-    int dynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
-    if ((err = parcel->readInt32(&dynamicRangeProfile)) != OK) {
+    int64_t dynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+    if ((err = parcel->readInt64(&dynamicRangeProfile)) != OK) {
         ALOGE("%s: Failed to read dynamic range profile type from parcel", __FUNCTION__);
         return err;
     }
 
-    int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
-    if ((err = parcel->readInt32(&streamUseCase)) != OK) {
+    int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+    if ((err = parcel->readInt64(&streamUseCase)) != OK) {
         ALOGE("%s: Failed to read stream use case from parcel", __FUNCTION__);
         return err;
     }
@@ -127,6 +133,7 @@
     mWidth = width;
     mHeight = height;
     mFormat = format;
+    mMaxPreviewFps = maxPreviewFps;
     mDataSpace = dataSpace;
     mUsage = usage;
     mRequestCount = requestCount;
@@ -166,6 +173,11 @@
         return err;
     }
 
+    if ((err = parcel->writeFloat(mMaxPreviewFps)) != OK) {
+        ALOGE("%s: Failed to write stream maxPreviewFps!", __FUNCTION__);
+        return err;
+    }
+
     if ((err = parcel->writeInt32(mDataSpace)) != OK) {
         ALOGE("%s: Failed to write stream dataSpace!", __FUNCTION__);
         return err;
@@ -216,12 +228,12 @@
         return err;
     }
 
-    if ((err = parcel->writeInt32(mDynamicRangeProfile)) != OK) {
+    if ((err = parcel->writeInt64(mDynamicRangeProfile)) != OK) {
         ALOGE("%s: Failed to write dynamic range profile type", __FUNCTION__);
         return err;
     }
 
-    if ((err = parcel->writeInt32(mStreamUseCase)) != OK) {
+    if ((err = parcel->writeInt64(mStreamUseCase)) != OK) {
         ALOGE("%s: Failed to write stream use case!", __FUNCTION__);
         return err;
     }
@@ -247,6 +259,7 @@
         mApiLevel(0),
         mIsNdk(false),
         mLatencyMs(-1),
+        mMaxPreviewFps(0),
         mSessionType(0),
         mInternalReconfigure(0),
         mRequestCount(0),
@@ -263,6 +276,7 @@
                 mApiLevel(apiLevel),
                 mIsNdk(isNdk),
                 mLatencyMs(latencyMs),
+                mMaxPreviewFps(0),
                 mSessionType(0),
                 mInternalReconfigure(0),
                 mRequestCount(0),
@@ -319,6 +333,12 @@
         return err;
     }
 
+    float maxPreviewFps;
+    if ((err = parcel->readFloat(&maxPreviewFps)) != OK) {
+        ALOGE("%s: Failed to read maxPreviewFps from parcel", __FUNCTION__);
+        return err;
+    }
+
     int32_t sessionType;
     if ((err = parcel->readInt32(&sessionType)) != OK) {
         ALOGE("%s: Failed to read session type from parcel", __FUNCTION__);
@@ -362,6 +382,7 @@
     mApiLevel = apiLevel;
     mIsNdk = isNdk;
     mLatencyMs = latencyMs;
+    mMaxPreviewFps = maxPreviewFps;
     mSessionType = sessionType;
     mInternalReconfigure = internalReconfigure;
     mRequestCount = requestCount;
@@ -415,6 +436,11 @@
         return err;
     }
 
+    if ((err = parcel->writeFloat(mMaxPreviewFps)) != OK) {
+        ALOGE("%s: Failed to write maxPreviewFps!", __FUNCTION__);
+        return err;
+    }
+
     if ((err = parcel->writeInt32(mSessionType)) != OK) {
         ALOGE("%s: Failed to write session type!", __FUNCTION__);
         return err;
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 5b8da34..11d4960 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -81,7 +81,7 @@
     return mDynamicRangeProfile;
 }
 
-int OutputConfiguration::getStreamUseCase() const {
+int64_t OutputConfiguration::getStreamUseCase() const {
     return mStreamUseCase;
 }
 
@@ -192,8 +192,8 @@
         return err;
     }
 
-    int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
-    if ((err = parcel->readInt32(&streamUseCase)) != OK) {
+    int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+    if ((err = parcel->readInt64(&streamUseCase)) != OK) {
         ALOGE("%s: Failed to read stream use case from parcel", __FUNCTION__);
         return err;
     }
@@ -232,8 +232,8 @@
     mDynamicRangeProfile = dynamicProfile;
 
     ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
-          " physicalCameraId = %s, isMultiResolution = %d, streamUseCase = %d, timestampBase = %d,"
-          " mirrorMode = %d",
+          " physicalCameraId = %s, isMultiResolution = %d, streamUseCase = %" PRId64
+          ", timestampBase = %d, mirrorMode = %d",
           __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType,
           String8(mPhysicalCameraId).string(), mIsMultiResolution, mStreamUseCase, timestampBase,
           mMirrorMode);
@@ -317,7 +317,7 @@
     err = parcel->writeInt64(mDynamicRangeProfile);
     if (err != OK) return err;
 
-    err = parcel->writeInt32(mStreamUseCase);
+    err = parcel->writeInt64(mStreamUseCase);
     if (err != OK) return err;
 
     err = parcel->writeInt32(mTimestampBase);
diff --git a/camera/include/camera/CameraSessionStats.h b/camera/include/camera/CameraSessionStats.h
index 26dc70c..15f5622 100644
--- a/camera/include/camera/CameraSessionStats.h
+++ b/camera/include/camera/CameraSessionStats.h
@@ -37,6 +37,7 @@
     int mWidth;
     int mHeight;
     int mFormat;
+    float mMaxPreviewFps;
     int mDataSpace;
     int64_t mUsage;
 
@@ -65,20 +66,20 @@
     // Dynamic range profile
     int64_t mDynamicRangeProfile;
     // Stream use case
-    int mStreamUseCase;
+    int64_t mStreamUseCase;
 
     CameraStreamStats() :
-            mWidth(0), mHeight(0), mFormat(0), mDataSpace(0), mUsage(0),
+            mWidth(0), mHeight(0), mFormat(0), mMaxPreviewFps(0), mDataSpace(0), mUsage(0),
             mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
             mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
             mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
             mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {}
-    CameraStreamStats(int width, int height, int format, int dataSpace, int64_t usage,
-            int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile,
+    CameraStreamStats(int width, int height, int format, float maxPreviewFps, int dataSpace,
+            int64_t usage, int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile,
             int streamUseCase)
-            : mWidth(width), mHeight(height), mFormat(format), mDataSpace(dataSpace),
-              mUsage(usage), mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
-              mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
+            : mWidth(width), mHeight(height), mFormat(format), mMaxPreviewFps(maxPreviewFps),
+              mDataSpace(dataSpace), mUsage(usage), mRequestCount(0), mErrorCount(0),
+              mStartLatencyMs(0), mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
               mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
               mDynamicRangeProfile(dynamicRangeProfile),
               mStreamUseCase(streamUseCase) {}
@@ -123,6 +124,7 @@
     bool mIsNdk;
     // latency in ms for camera open, close, or session creation.
     int mLatencyMs;
+    float mMaxPreviewFps;
 
     // Session info and statistics
     int mSessionType;
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index 6b0f333..b842885 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -63,7 +63,7 @@
     bool                       isShared() const;
     String16                   getPhysicalCameraId() const;
     bool                       isMultiResolution() const;
-    int                        getStreamUseCase() const;
+    int64_t                    getStreamUseCase() const;
     int                        getTimestampBase() const;
     int                        getMirrorMode() const;
 
@@ -185,7 +185,7 @@
     bool                       mIsMultiResolution;
     std::vector<int32_t>       mSensorPixelModesUsed;
     int64_t                    mDynamicRangeProfile;
-    int                        mStreamUseCase;
+    int64_t                    mStreamUseCase;
     int                        mTimestampBase;
     int                        mMirrorMode;
 };
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4c492f0..4891034 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -4216,7 +4216,7 @@
     /**
      * <p>The stream use cases supported by this camera device.</p>
      *
-     * <p>Type: int32[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)</p>
+     * <p>Type: int64[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)</p>
      *
      * <p>This tag may appear in:
      * <ul>
@@ -4260,7 +4260,7 @@
      * reprocessable session, constrained high speed session, or RAW stream combinations, the
      * application should leave stream use cases within the session as DEFAULT.</p>
      */
-    ACAMERA_SCALER_AVAILABLE_STREAM_USE_CASES =                 // int32[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)
+    ACAMERA_SCALER_AVAILABLE_STREAM_USE_CASES =                 // int64[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)
             ACAMERA_SCALER_START + 25,
     ACAMERA_SCALER_END,
 
diff --git a/drm/libmediadrm/CryptoHalAidl.cpp b/drm/libmediadrm/CryptoHalAidl.cpp
index 3dc62e9..bda664a 100644
--- a/drm/libmediadrm/CryptoHalAidl.cpp
+++ b/drm/libmediadrm/CryptoHalAidl.cpp
@@ -353,7 +353,9 @@
 
     err = statusAidlToStatusT(statusAidl);
     std::string msgStr(statusAidl.getMessage());
-    *errorDetailMsg = toString8(msgStr);
+    if (errorDetailMsg != nullptr) {
+        *errorDetailMsg = toString8(msgStr);
+    }
     if (err != OK) {
         ALOGE("Failed on decrypt, error description:%s", statusAidl.getDescription().c_str());
         return err;
@@ -415,4 +417,4 @@
 
     return DrmUtils::GetLogMessagesAidl<ICryptoPluginAidl>(mPlugin, logs);
 }
-}  // namespace android
\ No newline at end of file
+}  // namespace android
diff --git a/drm/libmediadrm/CryptoHalHidl.cpp b/drm/libmediadrm/CryptoHalHidl.cpp
index cbb6ddf..a290704 100644
--- a/drm/libmediadrm/CryptoHalHidl.cpp
+++ b/drm/libmediadrm/CryptoHalHidl.cpp
@@ -342,7 +342,9 @@
                 [&](Status_V1_2 status, uint32_t hBytesWritten, hidl_string hDetailedError) {
                     if (status == Status_V1_2::OK) {
                         bytesWritten = hBytesWritten;
-                        *errorDetailMsg = toString8(hDetailedError);
+                        if (errorDetailMsg != nullptr) {
+                            *errorDetailMsg = toString8(hDetailedError);
+                        }
                     }
                     err = toStatusT(status);
                 });
@@ -353,7 +355,9 @@
                 [&](Status status, uint32_t hBytesWritten, hidl_string hDetailedError) {
                     if (status == Status::OK) {
                         bytesWritten = hBytesWritten;
-                        *errorDetailMsg = toString8(hDetailedError);
+                        if (errorDetailMsg != nullptr) {
+                            *errorDetailMsg = toString8(hDetailedError);
+                        }
                     }
                     err = toStatusT(status);
                 });
diff --git a/drm/libmediadrm/DrmUtils.cpp b/drm/libmediadrm/DrmUtils.cpp
index 731755b..be0cd4b 100644
--- a/drm/libmediadrm/DrmUtils.cpp
+++ b/drm/libmediadrm/DrmUtils.cpp
@@ -177,7 +177,7 @@
         [](const char* instance, void* context) {
             auto fullName = std::string(IDrmFactoryAidl::descriptor) + "/" + std::string(instance);
             auto factory = IDrmFactoryAidl::fromBinder(
-                    ::ndk::SpAIBinder(AServiceManager_getService(fullName.c_str())));
+                    ::ndk::SpAIBinder(AServiceManager_waitForService(fullName.c_str())));
             if (factory == nullptr) {
                 ALOGE("not found IDrmFactory. Instance name:[%s]", fullName.c_str());
                 return;
diff --git a/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp b/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
index eabd41f..597b72d 100644
--- a/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
+++ b/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
@@ -20,6 +20,7 @@
 
 #include <binder/MemoryDealer.h>
 #include <hidlmemory/FrameworkUtils.h>
+#include <media/stagefright/foundation/AString.h>
 #include <mediadrm/CryptoHal.h>
 #include <mediadrm/DrmHal.h>
 #include <utils/String8.h>
@@ -401,7 +402,7 @@
         .secureMemory = nullptr};
 
     const uint64_t offset = 0;
-    AString *errorDetailMsg = nullptr;
+    AString errorDetailMsg;
     CryptoPlugin::Mode mode;
     bool shouldPassRandomCryptoMode = mFuzzedDataProvider->ConsumeBool();
     if (shouldPassRandomCryptoMode) {
@@ -411,7 +412,7 @@
             kCryptoMode[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, kNumCryptoMode - 1)];
     }
     mCrypto->decrypt(keyId, iv, mode, pattern, sourceBuffer, offset, subSamples, numSubSamples,
-                     destBuffer, errorDetailMsg);
+                     destBuffer, &errorDetailMsg);
 
     if (heapSeqNum >= 0) {
         mCrypto->unsetHeap(heapSeqNum);
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 66fe488..980ce55 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -31,6 +31,7 @@
 #include <chrono>
 #include <cstddef>
 #include <cstdint>
+#include <cstring>
 #include <ctime>
 #include <deque>
 #include <endian.h>
@@ -100,7 +101,8 @@
 
 template <typename... Args>
 void LogToBuffer(android_LogPriority level, const uint8_t uuid[16], const char *fmt, Args... args) {
-    const uint64_t* uuid2 = reinterpret_cast<const uint64_t*>(uuid);
+    uint64_t uuid2[2] = {};
+    std::memcpy(uuid2, uuid, sizeof(uuid2));
     std::string uuidFmt("uuid=[%lx %lx] ");
     uuidFmt += fmt;
     LogToBuffer(level, uuidFmt.c_str(), htobe64(uuid2[0]), htobe64(uuid2[1]), args...);
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index fd4ef95..9919e90 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,19 +1,10 @@
 {
   "presubmit": [
     {
-      "name": "CtsMediaDrmTestCases",
+      "name": "CtsMediaDrmFrameworkTestCases",
       "options" : [
         {
           "include-annotation": "android.platform.test.annotations.Presubmit"
-        },
-        {
-          "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
-        },
-        {
-          "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
-        },
-        {
-          "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
         }
       ]
     }
diff --git a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
index bae55d0..ea51e9d 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
@@ -83,7 +83,7 @@
 void DrmPlugin::initProperties() {
     mStringProperties.clear();
     mStringProperties[kVendorKey] = kAidlVendorValue;
-    mStringProperties[kVersionKey] = kVersionValue;
+    mStringProperties[kVersionKey] = kAidlVersionValue;
     mStringProperties[kPluginDescriptionKey] = kAidlPluginDescriptionValue;
     mStringProperties[kAlgorithmsKey] = kAidlAlgorithmsValue;
     mStringProperties[kListenerTestSupportKey] = kAidlListenerTestSupportValue;
@@ -380,7 +380,7 @@
     } else if (name == kDrmErrorTestKey) {
         value = mStringProperties[kDrmErrorTestKey];
     } else if (name == kAidlVersionKey) {
-        value = mStringProperties[kAidlVersionValue];
+        value = mStringProperties[kAidlVersionKey];
     } else {
         ALOGE("App requested unknown string property %s", name.c_str());
         status = Status::ERROR_DRM_CANNOT_HANDLE;
diff --git a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
index 019c726..c87aabc 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
@@ -1,9 +1,9 @@
-service vendor.drm-clearkey-service /vendor/bin/hw/android.hardware.drm-service.clearkey
+service vendor.drm-clearkey-service /vendor/bin/hw/android.hardware.drm-service-lazy.clearkey
+    oneshot
     disabled
     class hal
     user media
     group mediadrm drmrpc
     ioprio rt 4
     task_profiles ProcessCapacityHigh
-    interface aidl android.hardware.drm.IDrmFactory/clearkey
-    interface aidl android.hardware.drm.ICryptoFactory/clearkey
+    interface aidl android.hardware.drm.IDrmFactory/clearkey
\ No newline at end of file
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h b/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h
index 8038108..fb2cceb 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h
@@ -19,7 +19,7 @@
 
 namespace clearkeydrm {
 static const std::string kAidlVendorValue("Google");
-static const std::string kAidlVersionValue("1.0");
+static const std::string kAidlVersionValue("aidl-1");
 static const std::string kAidlPluginDescriptionValue("ClearKey CDM");
 static const std::string kAidlAlgorithmsValue("");
 static const std::string kAidlListenerTestSupportValue("true");
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 16e794a..d4025e5 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -38,13 +38,13 @@
     VideoFrame(uint32_t width, uint32_t height,
             uint32_t displayWidth, uint32_t displayHeight,
             uint32_t tileWidth, uint32_t tileHeight,
-            uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
+            uint32_t angle, uint32_t bpp, uint32_t bitDepth, bool hasData, size_t iccSize):
         mWidth(width), mHeight(height),
         mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
         mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
         mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
         mSize(hasData ? (bpp * width * height) : 0),
-        mIccSize(iccSize), mReserved(0) {
+        mIccSize(iccSize), mBitDepth(bitDepth) {
     }
 
     void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
@@ -84,7 +84,9 @@
     uint32_t mRowBytes;        // Number of bytes per row before rotation
     uint32_t mSize;            // Number of bytes of frame data
     uint32_t mIccSize;         // Number of bytes of ICC data
-    uint32_t mReserved;        // (padding to make mData 64-bit aligned)
+    uint32_t mBitDepth;        // number of bits per R / G / B channel
+
+    // Adding new items must be 64-bit aligned.
 };
 
 }; // namespace android
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index def8a18..2b9ec7d 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -212,9 +212,8 @@
                 (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
                 &usage, sizeof(usage));
 
-        mSource->configure(
-                mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace));
-        return OK;
+        return GetStatus(mSource->configure(
+                mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace)));
     }
 
     void disconnect() override {
diff --git a/media/janitors/media_leads_OWNERS b/media/janitors/media_leads_OWNERS
new file mode 100644
index 0000000..b7dbdee
--- /dev/null
+++ b/media/janitors/media_leads_OWNERS
@@ -0,0 +1,9 @@
+# gerrit owner/approvers corresponding to the TLs within the media team
+# loosely (as of 2022/3) fgoldfain@ and direct reports
+arifdikici@google.com
+elaurent@google.com
+fgoldfain@google.com    #{LAST_RESORT_SUGGESTION}
+lajos@google.com
+nchalko@google.com
+olly@google.com
+robertshih@google.com
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 041b427..1b8656d 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -47,6 +47,7 @@
     info->mRotationAngle = videoFrame->mRotationAngle;
     info->mBytesPerPixel = videoFrame->mBytesPerPixel;
     info->mDurationUs = videoFrame->mDurationUs;
+    info->mBitDepth = videoFrame->mBitDepth;
     if (videoFrame->mIccSize > 0) {
         info->mIccData.assign(
                 videoFrame->getFlattenedIccData(),
@@ -377,13 +378,14 @@
         //       issue (e.g. by copying).
         VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->unsecurePointer());
 
-        ALOGV("Image dimension %dx%d, display %dx%d, angle %d, iccSize %d",
+        ALOGV("Image dimension %dx%d, display %dx%d, angle %d, iccSize %d, bitDepth %d",
                 videoFrame->mWidth,
                 videoFrame->mHeight,
                 videoFrame->mDisplayWidth,
                 videoFrame->mDisplayHeight,
                 videoFrame->mRotationAngle,
-                videoFrame->mIccSize);
+                videoFrame->mIccSize,
+                videoFrame->mBitDepth);
 
         initFrameInfo(&mImageInfo, videoFrame);
 
@@ -729,4 +731,13 @@
     return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
 }
 
+uint32_t HeifDecoderImpl::getColorDepth() {
+    HeifFrameInfo* info = &mImageInfo;
+    if (info != nullptr) {
+        return mImageInfo.mBitDepth;
+    }
+
+    return 0;
+}
+
 } // namespace android
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index 2b9c710..86a8628 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -54,6 +54,8 @@
 
     size_t skipScanlines(size_t count) override;
 
+    uint32_t getColorDepth() override;
+
 private:
     struct DecodeThread;
 
diff --git a/media/libheif/include/HeifDecoderAPI.h b/media/libheif/include/HeifDecoderAPI.h
index fa51aef..dc12486 100644
--- a/media/libheif/include/HeifDecoderAPI.h
+++ b/media/libheif/include/HeifDecoderAPI.h
@@ -46,7 +46,8 @@
     uint32_t mHeight;
     int32_t  mRotationAngle;           // Rotation angle, clockwise, should be multiple of 90
     uint32_t mBytesPerPixel;           // Number of bytes for one pixel
-    int64_t mDurationUs;               // Duration of the frame in us
+    int64_t  mDurationUs;              // Duration of the frame in us
+    uint32_t mBitDepth;                // Number of bits for each of the R/G/B channels
     std::vector<uint8_t> mIccData;     // ICC data array
 };
 
@@ -162,6 +163,11 @@
      */
     virtual size_t skipScanlines(size_t count) = 0;
 
+    /*
+     * Returns color depth in bits for each of the R/G/B channels.
+     */
+    virtual uint32_t getColorDepth() = 0;
+
 private:
     HeifDecoder(const HeifFrameInfo&) = delete;
     HeifDecoder& operator=(const HeifFrameInfo&) = delete;
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 609298f..55b1ed7 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -28,6 +28,7 @@
 #include <datasource/PlayerServiceDataSourceFactory.h>
 #include <datasource/PlayerServiceFileSource.h>
 #include <media/IMediaHTTPService.h>
+#include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodecList.h>
@@ -194,17 +195,6 @@
         return NULL;
     }
 
-    if (metaOnly) {
-        return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
-    }
-
-    sp<IMediaSource> source = mExtractor->getTrack(i);
-
-    if (source.get() == NULL) {
-        ALOGE("unable to instantiate image track.");
-        return NULL;
-    }
-
     const char *mime;
     bool isHeif = false;
     if (!trackMeta->findCString(kKeyMIMEType, &mime)) {
@@ -223,16 +213,47 @@
         trackMeta->setCString(kKeyMIMEType, mime);
     }
 
-    bool preferhw = property_get_bool(
-            "media.stagefright.thumbnail.prefer_hw_codecs", false);
-    uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
-    Vector<AString> matchingCodecs;
     sp<AMessage> format = new AMessage;
     status_t err = convertMetaDataToMessage(trackMeta, &format);
     if (err != OK) {
         format = NULL;
     }
 
+    uint32_t bitDepth = 8;
+    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+        int32_t profile;
+        if (format->findInt32("profile", &profile)) {
+            if (HEVCProfileMain10 == profile || HEVCProfileMain10HDR10 == profile ||
+                    HEVCProfileMain10HDR10Plus == profile) {
+                  bitDepth = 10;
+            }
+        }
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+        int32_t profile;
+        if (format->findInt32("profile", &profile)) {
+            if (AV1ProfileMain10 == profile || AV1ProfileMain10HDR10 == profile ||
+                    AV1ProfileMain10HDR10Plus == profile) {
+                  bitDepth = 10;
+            }
+        }
+    }
+
+    if (metaOnly) {
+        return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail, bitDepth);
+    }
+
+    sp<IMediaSource> source = mExtractor->getTrack(i);
+
+    if (source.get() == NULL) {
+        ALOGE("unable to instantiate image track.");
+        return NULL;
+    }
+
+    bool preferhw = property_get_bool(
+            "media.stagefright.thumbnail.prefer_hw_codecs", false);
+    uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
+    Vector<AString> matchingCodecs;
+
     // If decoding thumbnail check decoder supports thumbnail dimensions instead
     int32_t thumbHeight, thumbWidth;
     if (thumbnail && format != NULL
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 5da32c9..3df8766 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -50,7 +50,7 @@
 
 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
         int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
-        int32_t dstBpp, bool allocRotated, bool metaOnly) {
+        int32_t dstBpp, uint32_t bitDepth, bool allocRotated, bool metaOnly) {
     int32_t rotationAngle;
     if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
         rotationAngle = 0;  // By default, no rotation
@@ -105,7 +105,7 @@
     }
 
     VideoFrame frame(width, height, displayWidth, displayHeight,
-            tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
+            tileWidth, tileHeight, rotationAngle, dstBpp, bitDepth, !metaOnly, iccSize);
 
     size_t size = frame.getFlattenedSize();
     sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
@@ -126,15 +126,15 @@
 
 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
         int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
-        int32_t dstBpp, bool allocRotated = false) {
-    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+        int32_t dstBpp, uint8_t bitDepth, bool allocRotated = false) {
+    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth,
             allocRotated, false /*metaOnly*/);
 }
 
 sp<IMemory> allocMetaFrame(const sp<MetaData>& trackMeta,
         int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
-        int32_t dstBpp) {
-    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+        int32_t dstBpp, uint8_t bitDepth) {
+    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth,
             false /*allocRotated*/, true /*metaOnly*/);
 }
 
@@ -211,7 +211,7 @@
 
 //static
 sp<IMemory> FrameDecoder::getMetadataOnly(
-        const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
+        const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail, uint32_t bitDepth) {
     OMX_COLOR_FORMATTYPE dstFormat;
     ui::PixelFormat captureFormat;
     int32_t dstBpp;
@@ -235,7 +235,8 @@
         }
     }
 
-    sp<IMemory> metaMem = allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
+    sp<IMemory> metaMem =
+            allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth);
 
     // try to fill sequence meta's duration based on average frame rate,
     // default to 33ms if frame rate is unavailable.
@@ -534,7 +535,6 @@
     if (dstFormat() == COLOR_Format32bitABGR2101010) {
         videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
     } else {
-        // TODO: Use Flexible color instead
         videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
     }
 
@@ -649,6 +649,11 @@
         height = slice_height;
     }
 
+    uint32_t bitDepth = 8;
+    if (COLOR_FormatYUVP010 == srcFormat) {
+        bitDepth = 10;
+    }
+
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
                 trackMeta(),
@@ -657,6 +662,7 @@
                 0,
                 0,
                 dstBpp(),
+                bitDepth,
                 mCaptureLayer != nullptr /*allocRotated*/);
         if (frameMem == nullptr) {
             return NO_MEMORY;
@@ -851,7 +857,6 @@
     if (dstFormat() == COLOR_Format32bitABGR2101010) {
         videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
     } else {
-        // TODO: Use Flexible color instead
         videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
     }
 
@@ -908,7 +913,7 @@
         return ERROR_MALFORMED;
     }
 
-    int32_t width, height, stride;
+    int32_t width, height, stride, srcFormat;
     if (outputFormat->findInt32("width", &width) == false) {
         ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
         return ERROR_MALFORMED;
@@ -921,10 +926,19 @@
         ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
         return ERROR_MALFORMED;
     }
+    if (outputFormat->findInt32("color-format", &srcFormat) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived: color format is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
+
+    uint32_t bitDepth = 8;
+    if (COLOR_FormatYUVP010 == srcFormat) {
+        bitDepth = 10;
+    }
 
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
-                trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
+                trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp(), bitDepth);
 
         if (frameMem == nullptr) {
             return NO_MEMORY;
@@ -935,9 +949,6 @@
         setFrame(frameMem);
     }
 
-    int32_t srcFormat;
-    CHECK(outputFormat->findInt32("color-format", &srcFormat));
-
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
 
     uint32_t standard, range, transfer;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index f81a5eb..f9e6c7b 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -4482,45 +4482,48 @@
     size_t size;
     bool found =
             meta->findData(kKeyHdrStaticInfo, &type, reinterpret_cast<const void**>(&data), &size);
-    if (found && size == 25) {
-        uint16_t displayPrimariesRX = U16LE_AT(&data[1]);
-        uint16_t displayPrimariesRY = U16LE_AT(&data[3]);
-
-        uint16_t displayPrimariesGX = U16LE_AT(&data[5]);
-        uint16_t displayPrimariesGY = U16LE_AT(&data[7]);
-
-        uint16_t displayPrimariesBX = U16LE_AT(&data[9]);
-        uint16_t displayPrimariesBY = U16LE_AT(&data[11]);
-
-        uint16_t whitePointX = U16LE_AT(&data[13]);
-        uint16_t whitePointY = U16LE_AT(&data[15]);
-
-        uint16_t maxDisplayMasteringLuminance = U16LE_AT(&data[17]);
-        uint16_t minDisplayMasteringLuminance = U16LE_AT(&data[19]);
-
-        uint16_t maxContentLightLevel = U16LE_AT(&data[21]);
-        uint16_t maxPicAverageLightLevel = U16LE_AT(&data[23]);
-
-        mOwner->beginBox("mdcv");
-        mOwner->writeInt16(displayPrimariesGX);
-        mOwner->writeInt16(displayPrimariesGY);
-        mOwner->writeInt16(displayPrimariesBX);
-        mOwner->writeInt16(displayPrimariesBY);
-        mOwner->writeInt16(displayPrimariesRX);
-        mOwner->writeInt16(displayPrimariesRY);
-        mOwner->writeInt16(whitePointX);
-        mOwner->writeInt16(whitePointY);
-        mOwner->writeInt32(maxDisplayMasteringLuminance * 10000);
-        mOwner->writeInt32(minDisplayMasteringLuminance * 10000);
-        mOwner->endBox();  // mdcv.
-
-        mOwner->beginBox("clli");
-        mOwner->writeInt16(maxContentLightLevel);
-        mOwner->writeInt16(maxPicAverageLightLevel);
-        mOwner->endBox();  // clli.
-    } else {
-        ALOGW("Ignoring HDR static info with unexpected size %d", (int)size);
+    if (!found) {
+        return; // Nothing to encode.
     }
+    if (size != 25) {
+        ALOGW("Ignoring HDR static info with unexpected size %d", (int)size);
+        return;
+    }
+    uint16_t displayPrimariesRX = U16LE_AT(&data[1]);
+    uint16_t displayPrimariesRY = U16LE_AT(&data[3]);
+
+    uint16_t displayPrimariesGX = U16LE_AT(&data[5]);
+    uint16_t displayPrimariesGY = U16LE_AT(&data[7]);
+
+    uint16_t displayPrimariesBX = U16LE_AT(&data[9]);
+    uint16_t displayPrimariesBY = U16LE_AT(&data[11]);
+
+    uint16_t whitePointX = U16LE_AT(&data[13]);
+    uint16_t whitePointY = U16LE_AT(&data[15]);
+
+    uint16_t maxDisplayMasteringLuminance = U16LE_AT(&data[17]);
+    uint16_t minDisplayMasteringLuminance = U16LE_AT(&data[19]);
+
+    uint16_t maxContentLightLevel = U16LE_AT(&data[21]);
+    uint16_t maxPicAverageLightLevel = U16LE_AT(&data[23]);
+
+    mOwner->beginBox("mdcv");
+    mOwner->writeInt16(displayPrimariesGX);
+    mOwner->writeInt16(displayPrimariesGY);
+    mOwner->writeInt16(displayPrimariesBX);
+    mOwner->writeInt16(displayPrimariesBY);
+    mOwner->writeInt16(displayPrimariesRX);
+    mOwner->writeInt16(displayPrimariesRY);
+    mOwner->writeInt16(whitePointX);
+    mOwner->writeInt16(whitePointY);
+    mOwner->writeInt32(maxDisplayMasteringLuminance * 10000);
+    mOwner->writeInt32(minDisplayMasteringLuminance * 10000);
+    mOwner->endBox();  // mdcv.
+
+    mOwner->beginBox("clli");
+    mOwner->writeInt16(maxContentLightLevel);
+    mOwner->writeInt16(maxPicAverageLightLevel);
+    mOwner->endBox();  // clli.
 }
 
 void MPEG4Writer::Track::writeAudioFourCCBox() {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 1ec09ea..45d0ae7 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -3105,10 +3105,8 @@
                         case STOPPING:
                         {
                             if (mFlags & kFlagSawMediaServerDie) {
-                                bool postPendingReplies = true;
                                 if (mState == RELEASING && !mReplyID) {
                                     ALOGD("Releasing asynchronously, so nothing to reply here.");
-                                    postPendingReplies = false;
                                 }
                                 // MediaServer died, there definitely won't
                                 // be a shutdown complete notification after
@@ -3121,8 +3119,11 @@
                                 if (mState == RELEASING) {
                                     mComponentName.clear();
                                 }
-                                if (postPendingReplies) {
+                                if (mReplyID) {
                                     postPendingRepliesAndDeferredMessages(origin + ":dead");
+                                } else {
+                                    ALOGD("no pending replies: %s:dead following %s",
+                                          origin.c_str(), mLastReplyOrigin.c_str());
                                 }
                                 sendErrorResponse = false;
                             } else if (!mReplyID) {
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index d59e4f5..e417324 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -50,7 +50,8 @@
     sp<IMemory> extractFrame(FrameRect *rect = NULL);
 
     static sp<IMemory> getMetadataOnly(
-            const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
+            const sp<MetaData> &trackMeta, int colorFormat,
+            bool thumbnail = false, uint32_t bitDepth = 0);
 
 protected:
     virtual ~FrameDecoder();
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 84653eb..78792c5 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -816,7 +816,7 @@
 constexpr char KEY_OPERATING_RATE[] = "operating-rate";
 constexpr char KEY_OUTPUT_REORDER_DEPTH[] = "output-reorder-depth";
 constexpr char KEY_PCM_ENCODING[] = "pcm-encoding";
-constexpr char KEY_PICTURE_TYPE[] = "picture_type";
+constexpr char KEY_PICTURE_TYPE[] = "picture-type";
 constexpr char KEY_PIXEL_ASPECT_RATIO_HEIGHT[] = "sar-height";
 constexpr char KEY_PIXEL_ASPECT_RATIO_WIDTH[] = "sar-width";
 constexpr char KEY_PREPEND_HEADER_TO_SYNC_FRAMES[] = "prepend-sps-pps-to-idr-frames";
diff --git a/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp b/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
index ac1e9b1..a8e64b6 100644
--- a/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
+++ b/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
@@ -393,3 +393,51 @@
     std::this_thread::sleep_for(std::chrono::milliseconds(100));
     looper->stop();
 }
+
+TEST(MediaCodecTest, DeadWhileStoppingError) {
+    // Test scenario:
+    //
+    // 1) Client thread calls stop(); MediaCodec looper thread calls
+    //    initiateShutdown(); shutdown is being handled at the component thread.
+    // 2) An error occurs while handling initiateShutdown().
+    // 3) MediaCodec looper thread handles the error.
+    // 4) Codec service dies after the error is handled
+    // 5) MediaCodec looper thread handles the death.
+
+    static const AString kCodecName{"test.codec"};
+    static const AString kCodecOwner{"nobody"};
+    static const AString kMediaType{"video/x-test"};
+
+    sp<MockCodec> mockCodec;
+    std::function<sp<CodecBase>(const AString &name, const char *owner)> getCodecBase =
+        [&mockCodec](const AString &, const char *) {
+            mockCodec = new MockCodec([](const std::shared_ptr<MockBufferChannel> &) {
+                // No mock setup, as we don't expect any buffer operations
+                // in this scenario.
+            });
+            ON_CALL(*mockCodec, initiateAllocateComponent(_))
+                .WillByDefault([mockCodec](const sp<AMessage> &) {
+                    mockCodec->callback()->onComponentAllocated(kCodecName.c_str());
+                });
+            ON_CALL(*mockCodec, initiateShutdown(_))
+                .WillByDefault([mockCodec](bool) {
+                    // 2)
+                    mockCodec->callback()->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+                    // 4)
+                    mockCodec->callback()->onError(DEAD_OBJECT, ACTION_CODE_FATAL);
+                    // Codec service has died, no callback.
+                });
+            return mockCodec;
+        };
+
+    sp<ALooper> looper{new ALooper};
+    sp<MediaCodec> codec = SetupMediaCodec(
+            kCodecOwner, kCodecName, kMediaType, looper, getCodecBase);
+    ASSERT_NE(nullptr, codec) << "Codec must not be null";
+    ASSERT_NE(nullptr, mockCodec) << "MockCodec must not be null";
+
+    codec->stop();
+    // sleep here so that the looper thread can handle the error
+    std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    looper->stop();
+}
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index 537df76..edddaa4 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -33,7 +33,7 @@
 
     shared_libs: [
         "android.hardware.media.omx@1.0",
-        "libandroidicu",
+        "libicu",
         "libfmq",
         "libbinder",
         "libhidlbase",
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 354971a..6f25cec 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -15,6 +15,8 @@
  */
 
 #include <inttypes.h>
+#include <mutex>
+#include <set>
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NdkMediaCodec"
@@ -42,6 +44,7 @@
 
 
 static media_status_t translate_error(status_t err) {
+
     if (err == OK) {
         return AMEDIA_OK;
     } else if (err == -EAGAIN) {
@@ -51,7 +54,18 @@
     } else if (err == DEAD_OBJECT) {
         return AMEDIACODEC_ERROR_RECLAIMED;
     }
-    ALOGE("sf error code: %d", err);
+
+    {
+        // minimize log flooding. Some CTS behavior made this noisy and apps could do the same.
+        static std::set<status_t> untranslated;
+        static std::mutex mutex;
+        std::lock_guard lg(mutex);
+
+        if (untranslated.find(err) == untranslated.end()) {
+            ALOGE("untranslated sf error code: %d", err);
+            untranslated.insert(err);
+        }
+    }
     return AMEDIA_ERROR_UNKNOWN;
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 18cf0c1..64c7923 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -305,6 +305,7 @@
     {
         return !devices().isEmpty() ? devices().itemAt(0)->hasGainController() : false;
     }
+    bool isRouted() const { return mPatchHandle != AUDIO_PATCH_HANDLE_NONE; }
 
     DeviceVector mDevices; /**< current devices this output is routed to */
     wp<AudioPolicyMix> mPolicyMix;  // non NULL when used by a dynamic policy
@@ -437,6 +438,8 @@
 
     uint32_t getRecommendedMuteDurationMs() const override;
 
+    void setTracksInvalidatedStatusByStrategy(product_strategy_t strategy);
+
     const sp<IOProfile> mProfile;          // I/O profile this output derives from
     audio_io_handle_t mIoHandle;           // output handle
     uint32_t mLatency;                  //
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index dc2403c..0431619 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -56,7 +56,13 @@
 
     virtual void dump(String8 *dst, int spaces) const;
     virtual std::string toShortString() const;
-
+    /**
+     * @brief isInternal
+     * @return true if the client corresponds to an audio patch created from createAudioPatch API or
+     * for call audio routing, or false if the client corresponds to an AudioTrack, AudioRecord or
+     * HW Audio Source.
+     */
+    virtual bool isInternal() const { return false; }
     audio_port_handle_t portId() const { return mPortId; }
     uid_t uid() const { return mUid; }
     audio_session_t session() const { return mSessionId; };
@@ -69,8 +75,16 @@
     bool isPreferredDeviceForExclusiveUse() const { return mPreferredDeviceForExclusiveUse; }
     virtual void setActive(bool active) { mActive = active; }
     bool active() const { return mActive; }
+    /**
+     * @brief hasPreferredDevice Note that as internal clients use preferred device for convenience,
+     * we do hide this internal behavior to prevent from regression (like invalidating track for
+     * clients following same strategies...)
+     * @param activeOnly
+     * @return
+     */
     bool hasPreferredDevice(bool activeOnly = false) const {
-        return mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
+        return !isInternal() &&
+                mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
     }
 
 private:
@@ -143,6 +157,14 @@
     }
     uint32_t getActivityCount() const { return mActivityCount; }
 
+    bool isInvalid() const {
+        return mIsInvalid;
+    }
+
+    void setIsInvalid() {
+        mIsInvalid = true;
+    }
+
 private:
     const audio_stream_type_t mStream;
     const product_strategy_t mStrategy;
@@ -155,6 +177,7 @@
      * involved in a duplication.
      */
     uint32_t mActivityCount = 0;
+    bool mIsInvalid = false;
 };
 
 class RecordClientDescriptor: public ClientDescriptor
@@ -211,6 +234,11 @@
         mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
         mSinkDevice = nullptr;
     }
+    bool belongsToOutput(const sp<SwAudioOutputDescriptor> &swOutput) const {
+        return swOutput != nullptr && mSwOutput.promote() == swOutput;
+    }
+    void setUseSwBridge() { mUseSwBridge = true; }
+    bool useSwBridge() const { return mUseSwBridge; }
     bool isConnected() const { return mPatchHandle != AUDIO_PATCH_HANDLE_NONE; }
     audio_patch_handle_t getPatchHandle() const { return mPatchHandle; }
     sp<DeviceDescriptor> srcDevice() const { return mSrcDevice; }
@@ -229,6 +257,35 @@
     sp<DeviceDescriptor> mSinkDevice;
     wp<SwAudioOutputDescriptor> mSwOutput;
     wp<HwAudioOutputDescriptor> mHwOutput;
+    bool mUseSwBridge = false;
+};
+
+/**
+ * @brief The InternalSourceClientDescriptor class
+ * Specialized Client Descriptor for either a raw patch created from @see createAudioPatch API
+ * or for internal audio patches managed by APM (e.g. phone call patches).
+ * Whatever the bridge created (software or hardware), we need a client to track the activity
+ * and manage volumes.
+ * The Audio Patch requested sink is expressed as a preferred device which allows to route
+ * the SwOutput. Then APM will performs checks on the UID (against UID of Audioserver) of the
+ * requester to prevent rerouting SwOutput involved in raw patches.
+ */
+class InternalSourceClientDescriptor: public SourceClientDescriptor
+{
+public:
+    InternalSourceClientDescriptor(
+            audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
+            const struct audio_port_config &config, const sp<DeviceDescriptor>& srcDevice,
+             const sp<DeviceDescriptor>& sinkDevice,
+            product_strategy_t strategy, VolumeSource volumeSource) :
+        SourceClientDescriptor(
+            portId, uid, attributes, config, srcDevice, AUDIO_STREAM_PATCH, strategy,
+            volumeSource)
+    {
+        setPreferredDeviceId(sinkDevice->getId());
+    }
+    bool isInternal() const override { return true; }
+    ~InternalSourceClientDescriptor() override = default;
 };
 
 class SourceClientCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 5c342a1..009fa82 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -726,6 +726,14 @@
     return mProfile->recommendedMuteDurationMs;
 }
 
+void SwAudioOutputDescriptor::setTracksInvalidatedStatusByStrategy(product_strategy_t strategy) {
+    for (const auto &client : getClientIterable()) {
+        if (strategy == client->strategy()) {
+            client->setIsInvalid();
+        }
+    }
+}
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 1132a29..d1655ef 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -100,7 +100,8 @@
     TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
         {config.sample_rate, config.channel_mask, config.format}, AUDIO_PORT_HANDLE_NONE,
         stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
-        {} /* Sources do not support secondary outputs*/, nullptr), mSrcDevice(srcDevice)
+        {} /* Sources do not support secondary outputs*/, nullptr),
+    mSrcDevice(srcDevice)
 {
 }
 
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
index faf15d6..9f6b703 100644
--- a/services/audiopolicy/fuzzer/Android.bp
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -62,4 +62,7 @@
         "libaudiopolicymanager_interface_headers",
     ],
     data: [":audiopolicyfuzzer_configuration_files"],
+    fuzz_config: {
+       cc: ["mnaganov@google.com"],
+    },
 }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index c3c9753..fcef136 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -664,12 +664,8 @@
     ALOGV("%s device rxDevice %s txDevice %s", __func__,
           rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str());
 
-    disconnectTelephonyRxAudioSource();
-    // release TX patch if any
-    if (mCallTxPatch != 0) {
-        releaseAudioPatchInternal(mCallTxPatch->getHandle());
-        mCallTxPatch.clear();
-    }
+    disconnectTelephonyAudioSource(mCallRxSourceClient);
+    disconnectTelephonyAudioSource(mCallTxSourceClient);
 
     auto telephonyRxModule =
         mHwModules.getModuleForDeviceType(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
@@ -727,7 +723,7 @@
                 closeActiveClients(activeDesc);
             }
         }
-        mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs);
+        connectTelephonyTxAudioSource(txSourceDevice, txSinkDevice, delayMs);
     }
     if (waitMs != nullptr) {
         *waitMs = muteWaitMs;
@@ -735,36 +731,6 @@
     return NO_ERROR;
 }
 
-sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
-        bool isRx, const sp<DeviceDescriptor> &device, uint32_t delayMs) {
-    PatchBuilder patchBuilder;
-
-    if (device == nullptr) {
-        return nullptr;
-    }
-
-    // @TODO: still ignoring the address, or not dealing platform with multiple telephony devices
-    if (isRx) {
-        patchBuilder.addSink(device).
-                addSource(mAvailableInputDevices.getDevice(
-                    AUDIO_DEVICE_IN_TELEPHONY_RX, String8(), AUDIO_FORMAT_DEFAULT));
-    } else {
-        patchBuilder.addSource(device).
-                addSink(mAvailableOutputDevices.getDevice(
-                    AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT));
-    }
-
-    audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
-    status_t status =
-            createAudioPatchInternal(patchBuilder.patch(), &patchHandle, mUidCached, delayMs);
-    ssize_t index = mAudioPatches.indexOfKey(patchHandle);
-    if (status != NO_ERROR || index < 0) {
-        ALOGW("%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
-        return nullptr;
-    }
-    return mAudioPatches.valueAt(index);
-}
-
 bool AudioPolicyManager::isDeviceOfModule(
         const sp<DeviceDescriptor>& devDesc, const char *moduleId) const {
     sp<HwModule> module = mHwModules.getModuleFromName(moduleId);
@@ -779,20 +745,55 @@
 
 void AudioPolicyManager::connectTelephonyRxAudioSource()
 {
-    disconnectTelephonyRxAudioSource();
+    disconnectTelephonyAudioSource(mCallRxSourceClient);
     const struct audio_port_config source = {
         .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE,
         .ext.device.type = AUDIO_DEVICE_IN_TELEPHONY_RX, .ext.device.address = ""
     };
     const auto aa = mEngine->getAttributesForStreamType(AUDIO_STREAM_VOICE_CALL);
-    status_t status = startAudioSource(&source, &aa, &mCallRxSourceClientPort, 0/*uid*/);
-    ALOGE_IF(status != NO_ERROR, "%s failed to start Telephony Rx AudioSource", __func__);
+    mCallRxSourceClient = startAudioSourceInternal(&source, &aa, 0/*uid*/);
+    ALOGE_IF(mCallRxSourceClient == nullptr,
+             "%s failed to start Telephony Rx AudioSource", __func__);
 }
 
-void AudioPolicyManager::disconnectTelephonyRxAudioSource()
+void AudioPolicyManager::disconnectTelephonyAudioSource(sp<SourceClientDescriptor> &clientDesc)
 {
-    stopAudioSource(mCallRxSourceClientPort);
-    mCallRxSourceClientPort = AUDIO_PORT_HANDLE_NONE;
+    if (clientDesc == nullptr) {
+        return;
+    }
+    ALOGW_IF(stopAudioSource(clientDesc->portId()) != NO_ERROR,
+            "%s error stopping audio source", __func__);
+    clientDesc.clear();
+}
+
+void AudioPolicyManager::connectTelephonyTxAudioSource(
+        const sp<DeviceDescriptor> &srcDevice, const sp<DeviceDescriptor> &sinkDevice,
+        uint32_t delayMs)
+{
+    disconnectTelephonyAudioSource(mCallTxSourceClient);
+    if (srcDevice == nullptr || sinkDevice == nullptr) {
+        ALOGW("%s could not create patch, invalid sink and/or source device(s)", __func__);
+        return;
+    }
+    PatchBuilder patchBuilder;
+    patchBuilder.addSource(srcDevice).addSink(sinkDevice);
+    ALOGV("%s between source %s and sink %s", __func__,
+            srcDevice->toString().c_str(), sinkDevice->toString().c_str());
+    auto callTxSourceClientPortId = PolicyAudioPort::getNextUniqueId();
+    const audio_attributes_t aa = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
+    struct audio_port_config source = {};
+    srcDevice->toAudioPortConfig(&source);
+    mCallTxSourceClient = new InternalSourceClientDescriptor(
+                callTxSourceClientPortId, mUidCached, aa, source, srcDevice, sinkDevice,
+                mCommunnicationStrategy, toVolumeSource(aa));
+    audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
+    status_t status = connectAudioSourceToSink(
+                mCallTxSourceClient, sinkDevice, patchBuilder.patch(), patchHandle, mUidCached,
+                delayMs);
+    ALOGE_IF(status != NO_ERROR, "%s() error %d creating TX audio patch", __func__, status);
+    if (status == NO_ERROR) {
+        mAudioSources.add(callTxSourceClientPortId, mCallTxSourceClient);
+    }
 }
 
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -860,11 +861,8 @@
                 rxDevices = mPrimaryOutput->devices();
             }
             if (oldState == AUDIO_MODE_IN_CALL) {
-                disconnectTelephonyRxAudioSource();
-                if (mCallTxPatch != 0) {
-                    releaseAudioPatchInternal(mCallTxPatch->getHandle());
-                    mCallTxPatch.clear();
-                }
+                disconnectTelephonyAudioSource(mCallRxSourceClient);
+                disconnectTelephonyAudioSource(mCallTxSourceClient);
             }
             setOutputDevices(mPrimaryOutput, rxDevices, force, 0);
         }
@@ -874,8 +872,10 @@
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
         DeviceVector newDevices = getNewOutputDevices(desc, true /*fromCache*/);
-        if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) {
-            setOutputDevices(desc, newDevices, !newDevices.isEmpty(), 0 /*delayMs*/);
+        if (state != AUDIO_MODE_IN_CALL || (desc != mPrimaryOutput && !isTelephonyRxOrTx(desc))) {
+            bool forceRouting = !newDevices.isEmpty();
+            setOutputDevices(desc, newDevices, forceRouting, 0 /*delayMs*/, nullptr,
+                             true /*requiresMuteCheck*/, !forceRouting /*requiresVolumeCheck*/);
         }
     }
 
@@ -1510,6 +1510,27 @@
     return msdPatches;
 }
 
+bool AudioPolicyManager::isMsdPatch(const audio_patch_handle_t &handle) const {
+    ssize_t index = mAudioPatches.indexOfKey(handle);
+    if (index < 0) {
+        return false;
+    }
+    const sp<AudioPatch> patch = mAudioPatches.valueAt(index);
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule == nullptr) {
+        return false;
+    }
+    const struct audio_port_config *sink = &patch->mPatch.sinks[0];
+    if (getMsdAudioOutDevices().contains(mAvailableOutputDevices.getDeviceFromId(sink->id))) {
+        return true;
+    }
+    index = getMsdOutputPatches().indexOfKey(handle);
+    if (index < 0) {
+        return false;
+    }
+    return true;
+}
+
 status_t AudioPolicyManager::getMsdProfiles(bool hwAvSync,
                                             const InputProfileCollection &inputProfiles,
                                             const OutputProfileCollection &outputProfiles,
@@ -1939,8 +1960,7 @@
 
     // force device change if the output is inactive and no audio patch is already present.
     // check active before incrementing usage count
-    bool force = !outputDesc->isActive() &&
-            (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
+    bool force = !outputDesc->isActive() && !outputDesc->isRouted();
 
     DeviceVector devices;
     sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote();
@@ -3511,11 +3531,15 @@
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
         DeviceVector newDevices = getNewOutputDevices(outputDesc, true /*fromCache*/);
-        if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
+        if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) ||
+                (outputDesc != mPrimaryOutput && !isTelephonyRxOrTx(outputDesc))) {
             // As done in setDeviceConnectionState, we could also fix default device issue by
             // preventing the force re-routing in case of default dev that distinguishes on address.
             // Let's give back to engine full device choice decision however.
-            waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs);
+            bool forceRouting = !newDevices.isEmpty();
+            waitMs = setOutputDevices(outputDesc, newDevices, forceRouting, delayMs, nullptr,
+                                      true /*requiresMuteCheck*/,
+                                      !forceRouting /*requiresVolumeCheck*/);
             // Only apply special touch sound delay once
             delayMs = 0;
         }
@@ -4064,17 +4088,15 @@
     return BAD_VALUE;
 }
 
-status_t AudioPolicyManager::createAudioPatchInternal(const struct audio_patch *patch,
-                                                      audio_patch_handle_t *handle,
-                                                      uid_t uid, uint32_t delayMs,
-                                                      const sp<SourceClientDescriptor>& sourceDesc)
+status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
+                                              audio_patch_handle_t *handle,
+                                              uid_t uid)
 {
     ALOGV("%s", __func__);
     if (handle == NULL || patch == NULL) {
         return BAD_VALUE;
     }
     ALOGV("%s num sources %d num sinks %d", __func__, patch->num_sources, patch->num_sinks);
-
     if (!audio_patch_is_valid(patch)) {
         return BAD_VALUE;
     }
@@ -4082,7 +4104,6 @@
     if (patch->num_sources > 1) {
         return INVALID_OPERATION;
     }
-
     if (patch->sources[0].role != AUDIO_PORT_ROLE_SOURCE) {
         return INVALID_OPERATION;
     }
@@ -4092,6 +4113,86 @@
         }
     }
 
+    sp<DeviceDescriptor> srcDevice = mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
+    sp<DeviceDescriptor> sinkDevice = mAvailableOutputDevices.getDeviceFromId(patch->sinks[0].id);
+    if (srcDevice == nullptr || sinkDevice == nullptr) {
+        ALOGW("%s could not create patch, invalid sink and/or source device(s)", __func__);
+        return BAD_VALUE;
+    }
+    ALOGV("%s between source %s and sink %s", __func__,
+            srcDevice->toString().c_str(), sinkDevice->toString().c_str());
+    audio_port_handle_t portId = PolicyAudioPort::getNextUniqueId();
+    // Default attributes, default volume priority, not to infer with non raw audio patches.
+    audio_attributes_t attributes = attributes_initializer(AUDIO_USAGE_MEDIA);
+    const struct audio_port_config *source = &patch->sources[0];
+    sp<SourceClientDescriptor> sourceDesc =
+            new InternalSourceClientDescriptor(
+                portId, uid, attributes, *source, srcDevice, sinkDevice,
+                mEngine->getProductStrategyForAttributes(attributes), toVolumeSource(attributes));
+
+    status_t status =
+            connectAudioSourceToSink(sourceDesc, sinkDevice, patch, *handle, uid, 0 /* delayMs */);
+
+    if (status != NO_ERROR) {
+        return INVALID_OPERATION;
+    }
+    mAudioSources.add(portId, sourceDesc);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::connectAudioSourceToSink(
+        const sp<SourceClientDescriptor>& sourceDesc, const sp<DeviceDescriptor> &sinkDevice,
+        const struct audio_patch *patch,
+        audio_patch_handle_t &handle,
+        uid_t uid, uint32_t delayMs)
+{
+    status_t status = createAudioPatchInternal(patch, &handle, uid, delayMs, sourceDesc);
+    if (status != NO_ERROR || mAudioPatches.indexOfKey(handle) < 0) {
+        ALOGW("%s patch panel could not connect device patch, error %d", __func__, status);
+        return INVALID_OPERATION;
+    }
+    sourceDesc->connect(handle, sinkDevice);
+    if (isMsdPatch(handle)) {
+        return NO_ERROR;
+    }
+    // SW Bridge? (@todo: HW bridge, keep track of HwOutput for device selection "reconsideration")
+    sp<SwAudioOutputDescriptor> swOutput = sourceDesc->swOutput().promote();
+    ALOG_ASSERT(swOutput != nullptr, "%s: a swOutput shall always be associated", __func__);
+    if (swOutput->getClient(sourceDesc->portId()) != nullptr) {
+        ALOGW("%s source portId has already been attached to outputDesc", __func__);
+        goto FailurePatchAdded;
+    }
+    status = swOutput->start();
+    if (status != NO_ERROR) {
+        goto FailureSourceAdded;
+    }
+    swOutput->addClient(sourceDesc);
+    status = startSource(swOutput, sourceDesc, &delayMs);
+    if (status != NO_ERROR) {
+        ALOGW("%s failed to start source, error %d", __FUNCTION__, status);
+        goto FailureSourceActive;
+    }
+    if (delayMs != 0) {
+        usleep(delayMs * 1000);
+    }
+    return NO_ERROR;
+
+FailureSourceActive:
+    swOutput->stop();
+    releaseOutput(sourceDesc->portId());
+FailureSourceAdded:
+    sourceDesc->setSwOutput(nullptr);
+FailurePatchAdded:
+    releaseAudioPatchInternal(handle);
+    return INVALID_OPERATION;
+}
+
+status_t AudioPolicyManager::createAudioPatchInternal(const struct audio_patch *patch,
+                                                      audio_patch_handle_t *handle,
+                                                      uid_t uid, uint32_t delayMs,
+                                                      const sp<SourceClientDescriptor>& sourceDesc)
+{
+    ALOGV("%s num sources %d num sinks %d", __func__, patch->num_sources, patch->num_sinks);
     sp<AudioPatch> patchDesc;
     ssize_t index = mAudioPatches.indexOfKey(*handle);
 
@@ -4280,7 +4381,7 @@
                 // in config XML to reach the sink so that is can be declared as available.
                 audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
                 sp<SwAudioOutputDescriptor> outputDesc = nullptr;
-                if (sourceDesc != nullptr) {
+                if (!sourceDesc->isInternal()) {
                     // take care of dynamic routing for SwOutput selection,
                     audio_attributes_t attributes = sourceDesc->attributes();
                     audio_stream_type_t stream = sourceDesc->stream();
@@ -4308,44 +4409,49 @@
                         return INVALID_OPERATION;
                     }
                     sourceDesc->setSwOutput(outputDesc);
+                } else {
+                    // Same for "raw patches" aka created from createAudioPatch API
+                    SortedVector<audio_io_handle_t> outputs =
+                            getOutputsForDevices(DeviceVector(sinkDevice), mOutputs);
+                    // if the sink device is reachable via an opened output stream, request to
+                    // go via this output stream by adding a second source to the patch
+                    // description
+                    output = selectOutput(outputs);
+                    if (output == AUDIO_IO_HANDLE_NONE) {
+                        ALOGE("%s no output available for internal patch sink", __func__);
+                        return INVALID_OPERATION;
+                    }
+                    outputDesc = mOutputs.valueFor(output);
+                    if (outputDesc->isDuplicated()) {
+                        ALOGV("%s output for device %s is duplicated",
+                              __func__, sinkDevice->toString().c_str());
+                        return INVALID_OPERATION;
+                    }
+                    sourceDesc->setSwOutput(outputDesc);
                 }
                 // create a software bridge in PatchPanel if:
                 // - source and sink devices are on different HW modules OR
                 // - audio HAL version is < 3.0
                 // - audio HAL version is >= 3.0 but no route has been declared between devices
-                // - called from startAudioSource (aka sourceDesc != nullptr) and source device does
-                //   not have a gain controller
+                // - called from startAudioSource (aka sourceDesc is not internal) and source device
+                //   does not have a gain controller
                 if (!srcDevice->hasSameHwModuleAs(sinkDevice) ||
                         (srcDevice->getModuleVersionMajor() < 3) ||
                         !srcDevice->getModule()->supportsPatch(srcDevice, sinkDevice) ||
-                        (sourceDesc != nullptr &&
+                        (!sourceDesc->isInternal() &&
                          srcDevice->getAudioPort()->getGains().size() == 0)) {
                     // support only one sink device for now to simplify output selection logic
                     if (patch->num_sinks > 1) {
                         return INVALID_OPERATION;
                     }
-                    if (sourceDesc == nullptr) {
-                        SortedVector<audio_io_handle_t> outputs =
-                                getOutputsForDevices(DeviceVector(sinkDevice), mOutputs);
-                        // if the sink device is reachable via an opened output stream, request to
-                        // go via this output stream by adding a second source to the patch
-                        // description
-                        output = selectOutput(outputs);
-                        if (output != AUDIO_IO_HANDLE_NONE) {
-                            outputDesc = mOutputs.valueFor(output);
-                            if (outputDesc->isDuplicated()) {
-                                ALOGV("%s output for device %s is duplicated",
-                                      __FUNCTION__, sinkDevice->toString().c_str());
-                                return INVALID_OPERATION;
-                            }
-                        }
-                    }
+                    sourceDesc->setUseSwBridge();
                     if (outputDesc != nullptr) {
                         audio_port_config srcMixPortConfig = {};
                         outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
                         // for volume control, we may need a valid stream
-                        srcMixPortConfig.ext.mix.usecase.stream = sourceDesc != nullptr ?
-                                    sourceDesc->stream() : AUDIO_STREAM_PATCH;
+                        srcMixPortConfig.ext.mix.usecase.stream = !sourceDesc->isInternal() ?
+                                    mEngine->getStreamTypeForAttributes(sourceDesc->attributes()) :
+                                    AUDIO_STREAM_PATCH;
                         patchBuilder.addSource(srcMixPortConfig);
                     }
                 }
@@ -4368,11 +4474,9 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle,
-                                                  uid_t uid)
+status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle, uid_t uid)
 {
-    ALOGV("releaseAudioPatch() patch %d", handle);
-
+    ALOGV("%s patch %d", __func__, handle);
     ssize_t index = mAudioPatches.indexOfKey(handle);
 
     if (index < 0) {
@@ -4384,11 +4488,21 @@
     if (patchDesc->getUid() != mUidCached && uid != patchDesc->getUid()) {
         return INVALID_OPERATION;
     }
-    return releaseAudioPatchInternal(handle);
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    for (size_t i = 0; i < mAudioSources.size(); i++)  {
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc != nullptr && sourceDesc->getPatchHandle() == handle) {
+            portId = sourceDesc->portId();
+            break;
+        }
+    }
+    return portId != AUDIO_PORT_HANDLE_NONE ?
+                stopAudioSource(portId) : releaseAudioPatchInternal(handle);
 }
 
 status_t AudioPolicyManager::releaseAudioPatchInternal(audio_patch_handle_t handle,
-                                                       uint32_t delayMs)
+                                                       uint32_t delayMs,
+                                                       const sp<SourceClientDescriptor>& sourceDesc)
 {
     ALOGV("%s patch %d", __func__, handle);
     if (mAudioPatches.indexOfKey(handle) < 0) {
@@ -4429,26 +4543,29 @@
             removeAudioPatch(patchDesc->getHandle());
             nextAudioPortGeneration();
             mpClientInterface->onAudioPatchListUpdate();
-            // SW Bridge
+            // SW or HW Bridge
+            sp<SwAudioOutputDescriptor> outputDesc = nullptr;
+            audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
             if (patch->num_sources > 1 && patch->sources[1].type == AUDIO_PORT_TYPE_MIX) {
-                sp<SwAudioOutputDescriptor> outputDesc =
-                        mOutputs.getOutputFromId(patch->sources[1].id);
-                if (outputDesc == NULL) {
-                    ALOGW("%s output not found for id %d", __func__, patch->sources[0].id);
-                    // releaseOutput has already called closeOuput in case of direct output
-                    return NO_ERROR;
-                }
-                if (patchDesc->getHandle() != outputDesc->getPatchHandle()) {
-                    // force SwOutput patch removal as AF counter part patch has already gone.
-                    ALOGV("%s reset patch handle on Output as different from SWBridge", __func__);
-                    removeAudioPatch(outputDesc->getPatchHandle());
-                }
-                outputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
+                outputDesc = mOutputs.getOutputFromId(patch->sources[1].id);
+            } else if (patch->num_sources == 1 && sourceDesc != nullptr) {
+                outputDesc = sourceDesc->swOutput().promote();
+            }
+            if (outputDesc == nullptr) {
+                ALOGW("%s no output for id %d", __func__, patch->sources[0].id);
+                // releaseOutput has already called closeOutput in case of direct output
+                return NO_ERROR;
+            }
+            if (!outputDesc->isActive() && !sourceDesc->useSwBridge()) {
+                resetOutputDevice(outputDesc);
+            } else {
+                // Reuse patch handle if still valid / do not force rerouting if still routed
+                patchHandle = outputDesc->getPatchHandle();
                 setOutputDevices(outputDesc,
                                  getNewOutputDevices(outputDesc, true /*fromCache*/),
-                                 true, /*force*/
+                                 patchHandle == AUDIO_PATCH_HANDLE_NONE, /*force*/
                                  0,
-                                 NULL);
+                                 patchHandle == AUDIO_PATCH_HANDLE_NONE ? nullptr : &patchHandle);
             }
         } else {
             return BAD_VALUE;
@@ -4694,6 +4811,18 @@
     return status;
 }
 
+sp<SourceClientDescriptor> AudioPolicyManager::startAudioSourceInternal(
+        const struct audio_port_config *source, const audio_attributes_t *attributes, uid_t uid)
+{
+    ALOGV("%s", __FUNCTION__);
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+
+    status_t status = startAudioSource(source, attributes, &portId, uid);
+    ALOGE_IF(status != OK, "%s: failed to start audio source (%d)", __func__, status);
+    return mAudioSources.valueFor(portId);
+}
+
+
 status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
     ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
@@ -4718,52 +4847,9 @@
     PatchBuilder patchBuilder;
     patchBuilder.addSink(sinkDevice).addSource(srcDevice);
     audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
-    status_t status =
-            createAudioPatchInternal(patchBuilder.patch(), &handle, mUidCached, 0, sourceDesc);
-    if (status != NO_ERROR || mAudioPatches.indexOfKey(handle) < 0) {
-        ALOGW("%s patch panel could not connect device patch, error %d", __func__, status);
-        return INVALID_OPERATION;
-    }
-    sourceDesc->connect(handle, sinkDevice);
-    // SW Bridge? (@todo: HW bridge, keep track of HwOutput for device selection "reconsideration")
-    sp<SwAudioOutputDescriptor> swOutput = sourceDesc->swOutput().promote();
-    if (swOutput != 0) {
-        status = swOutput->start();
-        if (status != NO_ERROR) {
-            goto FailureSourceAdded;
-        }
-        if (swOutput->getClient(sourceDesc->portId()) != nullptr) {
-            ALOGW("%s source portId has already been attached to outputDesc", __func__);
-            goto FailureReleasePatch;
-        }
-        swOutput->addClient(sourceDesc);
-        uint32_t delayMs = 0;
-        status = startSource(swOutput, sourceDesc, &delayMs);
-        if (status != NO_ERROR) {
-            ALOGW("%s failed to start source, error %d", __FUNCTION__, status);
-            goto FailureSourceActive;
-        }
-        if (delayMs != 0) {
-            usleep(delayMs * 1000);
-        }
-    } else {
-        sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->hwOutput().promote();
-        if (hwOutputDesc != 0) {
-          //   create Hwoutput and add to mHwOutputs
-        } else {
-            ALOGW("%s source has neither SW nor HW output", __FUNCTION__);
-        }
-    }
-    return NO_ERROR;
 
-FailureSourceActive:
-    swOutput->stop();
-    releaseOutput(sourceDesc->portId());
-FailureSourceAdded:
-    sourceDesc->setSwOutput(nullptr);
-FailureReleasePatch:
-    releaseAudioPatchInternal(handle);
-    return INVALID_OPERATION;
+    return connectAudioSourceToSink(
+                sourceDesc, sinkDevice, patchBuilder.patch(), handle, mUidCached, 0 /*delayMs*/);
 }
 
 status_t AudioPolicyManager::stopAudioSource(audio_port_handle_t portId)
@@ -5089,7 +5175,7 @@
             ALOGW("%s source has neither SW nor HW output", __FUNCTION__);
         }
     }
-    status_t status = releaseAudioPatchInternal(sourceDesc->getPatchHandle());
+    status_t status = releaseAudioPatchInternal(sourceDesc->getPatchHandle(), 0, sourceDesc);
     sourceDesc->disconnect();
     return status;
 }
@@ -6061,7 +6147,7 @@
         sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
         if (sourceDesc != nullptr && followsSameRouting(attr, sourceDesc->attributes())
                 && sourceDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE
-                && !isCallRxAudioSource(sourceDesc)) {
+                && !isCallRxAudioSource(sourceDesc) && !sourceDesc->isInternal()) {
             connectAudioSource(sourceDesc);
         }
     }
@@ -6174,7 +6260,7 @@
                                 newDevices.types());
             }
             sp<SourceClientDescriptor> source = getSourceForAttributesOnOutput(srcOut, attr);
-            if (source != nullptr && !isCallRxAudioSource(source)) {
+            if (source != nullptr && !isCallRxAudioSource(source) && !source->isInternal()) {
                 connectAudioSource(source);
             }
         }
@@ -6188,6 +6274,12 @@
             for (auto stream :  mEngine->getStreamTypesForProductStrategy(psId)) {
                 mpClientInterface->invalidateStream(stream);
             }
+            for (audio_io_handle_t srcOut : srcOutputs) {
+                sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
+                if (desc == nullptr) continue;
+
+                desc->setTracksInvalidatedStatusByStrategy(psId);
+            }
         }
     }
 }
@@ -6708,6 +6800,8 @@
         muteWaitMs = 0;
     }
 
+    bool outputRouted = outputDesc->isRouted();
+
     // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
     // output profile or if new device is not supported AND previous device(s) is(are) still
     // available (otherwise reset device must be done on the output)
@@ -6724,8 +6818,7 @@
     //  AND force is not specified
     //  AND the output is connected by a valid audio patch.
     // Doing this check here allows the caller to call setOutputDevices() without conditions
-    if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) &&
-            !force && outputDesc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
+    if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) && !force && outputRouted) {
         ALOGV("%s setting same device %s or null device, force=%d, patch handle=%d", __func__,
               filteredDevices.toString().c_str(), force, outputDesc->getPatchHandle());
         if (requiresVolumeCheck && !filteredDevices.isEmpty()) {
@@ -6765,6 +6858,9 @@
                                                audio_patch_handle_t *patchHandle)
 {
     ssize_t index;
+    if (patchHandle == nullptr && !outputDesc->isRouted()) {
+        return INVALID_OPERATION;
+    }
     if (patchHandle) {
         index = mAudioPatches.indexOfKey(*patchHandle);
     } else {
@@ -7509,7 +7605,10 @@
         routedDevices.add(device);
     }
     for (const auto& client : activeClients) {
-        // TODO: b/175343099 only travel the valid client
+        if (client->isInvalid()) {
+            // No need to take care about invalidated clients.
+            continue;
+        }
         sp<DeviceDescriptor> preferredDevice =
                 mAvailableOutputDevices.getDeviceFromId(client->preferredDeviceId());
         if (mEngine->getOutputDevicesForAttributes(
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 4d307cf..6f8b897 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -263,10 +263,7 @@
         virtual status_t getAudioPort(struct audio_port_v7 *port);
         virtual status_t createAudioPatch(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle,
-                                           uid_t uid) {
-            return createAudioPatchInternal(patch, handle, uid);
-        }
-
+                                           uid_t uid);
         virtual status_t releaseAudioPatch(audio_patch_handle_t handle,
                                               uid_t uid);
         virtual status_t listAudioPatches(unsigned int *num_patches,
@@ -638,13 +635,22 @@
         void updateCallAndOutputRouting(bool forceVolumeReeval = true, uint32_t delayMs = 0);
 
         bool isCallRxAudioSource(const sp<SourceClientDescriptor> &source) {
-            return mCallRxSourceClientPort != AUDIO_PORT_HANDLE_NONE
-                && source == mAudioSources.valueFor(mCallRxSourceClientPort);
+            return mCallRxSourceClient != nullptr && source == mCallRxSourceClient;
         }
 
         void connectTelephonyRxAudioSource();
 
-        void disconnectTelephonyRxAudioSource();
+        void disconnectTelephonyAudioSource(sp<SourceClientDescriptor> &clientDesc);
+
+        void connectTelephonyTxAudioSource(const sp<DeviceDescriptor> &srcdevice,
+                                           const sp<DeviceDescriptor> &sinkDevice,
+                                           uint32_t delayMs);
+
+        bool isTelephonyRxOrTx(const sp<SwAudioOutputDescriptor>& desc) const {
+            return (mCallRxSourceClient != nullptr && mCallRxSourceClient->belongsToOutput(desc))
+                    || (mCallTxSourceClient != nullptr
+                    &&  mCallTxSourceClient->belongsToOutput(desc));
+        }
 
         /**
          * @brief updates routing for all inputs.
@@ -851,6 +857,12 @@
         status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
         status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
 
+        status_t connectAudioSourceToSink(const sp<SourceClientDescriptor>& sourceDesc,
+                                          const sp<DeviceDescriptor> &sinkDevice,
+                                          const struct audio_patch *patch,
+                                          audio_patch_handle_t &handle,
+                                          uid_t uid, uint32_t delayMs);
+
         sp<SourceClientDescriptor> getSourceForAttributesOnOutput(audio_io_handle_t output,
                                                                   const audio_attributes_t &attr);
         void clearAudioSourcesForOutput(audio_io_handle_t output);
@@ -901,8 +913,6 @@
 
         SoundTriggerSessionCollection mSoundTriggerSessions;
 
-        sp<AudioPatch> mCallTxPatch;
-
         HwAudioOutputCollection mHwOutputs;
         SourceClientCollection mAudioSources;
 
@@ -943,7 +953,8 @@
 
         // The port handle of the hardware audio source created internally for the Call RX audio
         // end point.
-        audio_port_handle_t mCallRxSourceClientPort = AUDIO_PORT_HANDLE_NONE;
+        sp<SourceClientDescriptor> mCallRxSourceClient;
+        sp<SourceClientDescriptor> mCallTxSourceClient;
 
         // Support for Multi-Stream Decoder (MSD) module
         sp<DeviceDescriptor> getMsdAudioInDevice() const;
@@ -975,7 +986,13 @@
         // Called by setDeviceConnectionState()
         status_t deviceToAudioPort(audio_devices_t deviceType, const char* device_address,
                                    const char* device_name, media::AudioPort* aidPort);
+        bool isMsdPatch(const audio_patch_handle_t &handle) const;
+
 private:
+        sp<SourceClientDescriptor> startAudioSourceInternal(
+                const struct audio_port_config *source, const audio_attributes_t *attributes,
+                uid_t uid);
+
         void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
 
         // Add or remove AC3 DTS encodings based on user preferences.
@@ -1120,21 +1137,25 @@
          * @param[out] handle patch handle to be provided if patch installed correctly
          * @param[in] uid of the client
          * @param[in] delayMs if required
-         * @param[in] sourceDesc [optional] in case of external source, source client to be
-         * configured by the patch, i.e. assigning an Output (HW or SW)
+         * @param[in] sourceDesc source client to be configured when creating the patch, i.e.
+         *            assigning an Output (HW or SW) used for volume control.
          * @return NO_ERROR if patch installed correctly, error code otherwise.
          */
         status_t createAudioPatchInternal(const struct audio_patch *patch,
                                           audio_patch_handle_t *handle,
-                                          uid_t uid, uint32_t delayMs = 0,
-                                          const sp<SourceClientDescriptor>& sourceDesc = nullptr);
+                                          uid_t uid, uint32_t delayMs,
+                                          const sp<SourceClientDescriptor>& sourceDesc);
         /**
          * @brief releaseAudioPatchInternal internal function to remove an audio patch
          * @param[in] handle of the patch to be removed
          * @param[in] delayMs if required
+         * @param[in] sourceDesc [optional] in case of external source, source client to be
+         * unrouted from the patch, i.e. assigning an Output (HW or SW)
          * @return NO_ERROR if patch removed correctly, error code otherwise.
          */
-        status_t releaseAudioPatchInternal(audio_patch_handle_t handle, uint32_t delayMs = 0);
+        status_t releaseAudioPatchInternal(audio_patch_handle_t handle,
+                                           uint32_t delayMs = 0,
+                                           const sp<SourceClientDescriptor>& sourceDesc = nullptr);
 
         status_t installPatch(const char *caller,
                 audio_patch_handle_t *patchHandle,
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 3ee2aa3..9955b6b 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -114,6 +114,13 @@
 
 void AudioPolicyService::onFirstRef()
 {
+    // Log an AudioPolicy "constructor" mediametrics event on first ref.
+    // This records the time it takes to load the audio modules and devices.
+    mediametrics::Defer defer([beginNs = systemTime()] {
+        mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_POLICY)
+            .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CTOR)
+            .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
+            .record(); });
     {
         Mutex::Autolock _l(mLock);
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index bcba80e..701206a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -864,7 +864,7 @@
     bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
     bool isMultiResolution = outputConfiguration.isMultiResolution();
     int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
-    int streamUseCase = outputConfiguration.getStreamUseCase();
+    int64_t streamUseCase = outputConfiguration.getStreamUseCase();
     int timestampBase = outputConfiguration.getTimestampBase();
     int mirrorMode = outputConfiguration.getMirrorMode();
 
@@ -1260,7 +1260,7 @@
     }
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
-    int streamUseCase = outputConfiguration.getStreamUseCase();
+    int64_t streamUseCase = outputConfiguration.getStreamUseCase();
     int timestampBase = outputConfiguration.getTimestampBase();
     int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
     int mirrorMode = outputConfiguration.getMirrorMode();
@@ -1629,7 +1629,7 @@
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
     int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
-    int streamUseCase= outputConfiguration.getStreamUseCase();
+    int64_t streamUseCase= outputConfiguration.getStreamUseCase();
     int timestampBase = outputConfiguration.getTimestampBase();
     int mirrorMode = outputConfiguration.getMirrorMode();
     for (auto& bufferProducer : bufferProducers) {
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 10fa33f..9303fd2 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -287,7 +287,7 @@
     }
 }
 
-status_t CameraOfflineSessionClient::notifyActive() {
+status_t CameraOfflineSessionClient::notifyActive(float maxPreviewFps __unused) {
     return startCameraStreamingOps();
 }
 
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index ef1d2de..f2c42d8 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -94,7 +94,7 @@
     // NotificationListener API
     void notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras) override;
     void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) override;
-    status_t notifyActive() override;
+    status_t notifyActive(float maxPreviewFps) override;
     void notifyIdle(int64_t requestCount, int64_t resultErrorCount, bool deviceError,
             const std::vector<hardware::CameraStreamStats>& streamStats) override;
     void notifyAutoFocus(uint8_t newState, int triggerId) override;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index a29f3a6..6ed3c02 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -315,7 +315,7 @@
 }
 
 template <typename TClientBase>
-status_t Camera2ClientBase<TClientBase>::notifyActive() {
+status_t Camera2ClientBase<TClientBase>::notifyActive(float maxPreviewFps) {
     if (!mDeviceActive) {
         status_t res = TClientBase::startCameraStreamingOps();
         if (res != OK) {
@@ -323,7 +323,7 @@
                     TClientBase::mCameraIdStr.string(), res);
             return res;
         }
-        CameraServiceProxyWrapper::logActive(TClientBase::mCameraIdStr);
+        CameraServiceProxyWrapper::logActive(TClientBase::mCameraIdStr, maxPreviewFps);
     }
     mDeviceActive = true;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 182e6ef..6b90f5e 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -73,7 +73,8 @@
 
     virtual void          notifyError(int32_t errorCode,
                                       const CaptureResultExtras& resultExtras);
-    virtual status_t      notifyActive();  // Returns errors on app ops permission failures
+    // Returns errors on app ops permission failures
+    virtual status_t      notifyActive(float maxPreviewFps);
     virtual void          notifyIdle(int64_t requestCount, int64_t resultErrorCount,
                                      bool deviceError,
                                      const std::vector<hardware::CameraStreamStats>& streamStats);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 5883988..05edd6a 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -184,7 +184,7 @@
             bool isShared = false, bool isMultiResolution = false,
             uint64_t consumerUsage = 0,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) = 0;
 
@@ -205,7 +205,7 @@
             bool isShared = false, bool isMultiResolution = false,
             uint64_t consumerUsage = 0,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) = 0;
 
diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
index 54e42a6..f39b92a 100644
--- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
+++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
@@ -40,7 +40,8 @@
     // Required for API 1 and 2
     virtual void notifyError(int32_t errorCode,
                              const CaptureResultExtras &resultExtras) = 0;
-    virtual status_t notifyActive() = 0; // May return an error since it checks appops
+    // May return an error since it checks appops
+    virtual status_t notifyActive(float maxPreviewFps) = 0;
     virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError,
             const std::vector<hardware::CameraStreamStats>& streamStats) = 0;
 
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index c337eda..4cc03f0 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -642,7 +642,7 @@
         removeRef(DeviceMode::CAMERA, id);
         ALOGE("%s: Transaction error opening a session for camera device %s: %s",
                 __FUNCTION__, id.c_str(), ret.getMessage());
-        return DEAD_OBJECT;
+        return AidlProviderInfo::mapToStatusT(ret);
     }
     return OK;
 }
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index b2a7fee..6f35e56 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2021 The Android Open Source Project
+ * Copyright (C) 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -45,10 +45,32 @@
 using ICameraProvider = aidl::android::hardware::camera::provider::ICameraProvider;
 using StatusListener = CameraProviderManager::StatusListener;
 
+static status_t mapExceptionCodeToStatusT(binder_exception_t binderException) {
+    switch (binderException) {
+        case EX_NONE:
+            return OK;
+        case EX_ILLEGAL_ARGUMENT:
+        case EX_NULL_POINTER:
+        case EX_BAD_PARCELABLE:
+        case EX_ILLEGAL_STATE:
+            return BAD_VALUE;
+        case EX_UNSUPPORTED_OPERATION:
+            return INVALID_OPERATION;
+        case EX_TRANSACTION_FAILED:
+            return DEAD_OBJECT;
+        default:
+            return UNKNOWN_ERROR;
+    }
+}
+
 status_t AidlProviderInfo::mapToStatusT(const ndk::ScopedAStatus& s) {
     using Status = aidl::android::hardware::camera::common::Status;
+    auto exceptionCode = s.getExceptionCode();
+    if (exceptionCode != EX_SERVICE_SPECIFIC) {
+        return mapExceptionCodeToStatusT(exceptionCode);
+    }
     Status st = static_cast<Status>(s.getServiceSpecificError());
-    switch(st) {
+    switch (st) {
         case Status::OK:
             return OK;
         case Status::ILLEGAL_ARGUMENT:
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 688b6df..aeffd24 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -978,7 +978,7 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
-            uint64_t consumerUsage, int64_t dynamicRangeProfile, int streamUseCase,
+            uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
             int timestampBase, int mirrorMode) {
     ATRACE_CALL();
 
@@ -1013,8 +1013,8 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
         const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
         std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
-        uint64_t consumerUsage, int64_t dynamicRangeProfile, int streamUseCase, int timestampBase,
-        int mirrorMode) {
+        uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
+        int timestampBase, int mirrorMode) {
     ATRACE_CALL();
 
     Mutex::Autolock il(mInterfaceLock);
@@ -1022,7 +1022,8 @@
     Mutex::Autolock l(mLock);
     ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
             " consumer usage %" PRIu64 ", isShared %d, physicalCameraId %s, isMultiResolution %d"
-            " dynamicRangeProfile %" PRIx64 ", streamUseCase %d, timestampBase %d, mirrorMode %d",
+            " dynamicRangeProfile 0x%" PRIx64 ", streamUseCase %" PRId64 ", timestampBase %d,"
+            " mirrorMode %d",
             mId.string(), mNextStreamId, width, height, format, dataSpace, rotation,
             consumerUsage, isShared, physicalCameraId.string(), isMultiResolution,
             dynamicRangeProfile, streamUseCase, timestampBase, mirrorMode);
@@ -1787,6 +1788,20 @@
     return OK;
 }
 
+float Camera3Device::getMaxPreviewFps(sp<camera3::Camera3OutputStreamInterface> stream) {
+    camera_metadata_entry minDurations =
+            mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+    for (size_t i = 0; i < minDurations.count; i += 4) {
+        if (minDurations.data.i64[i] == stream->getFormat()
+                && minDurations.data.i64[i+1] == stream->getWidth()
+                && minDurations.data.i64[i+2] == stream->getHeight()) {
+            int64_t minFrameDuration = minDurations.data.i64[i+3];
+            return 1e9f / minFrameDuration;
+        }
+    }
+    return 0.0f;
+}
+
 /**
  * Methods called by subclasses
  */
@@ -1795,6 +1810,7 @@
     ATRACE_CALL();
     std::vector<int> streamIds;
     std::vector<hardware::CameraStreamStats> streamStats;
+    float sessionMaxPreviewFps = 0.0f;
 
     {
         // Need mLock to safely update state and synchronize to current
@@ -1814,21 +1830,25 @@
         // state changes
         if (mPauseStateNotify) return;
 
-        // Populate stream statistics in case of Idle
-        if (idle) {
-            for (size_t i = 0; i < mOutputStreams.size(); i++) {
-                auto stream = mOutputStreams[i];
-                if (stream.get() == nullptr) continue;
+        for (size_t i = 0; i < mOutputStreams.size(); i++) {
+            auto stream = mOutputStreams[i];
+            if (stream.get() == nullptr) continue;
+
+            float streamMaxPreviewFps = getMaxPreviewFps(stream);
+            sessionMaxPreviewFps = std::max(sessionMaxPreviewFps, streamMaxPreviewFps);
+
+            // Populate stream statistics in case of Idle
+            if (idle) {
                 streamIds.push_back(stream->getId());
                 Camera3Stream* camera3Stream = Camera3Stream::cast(stream->asHalStream());
                 int64_t usage = 0LL;
-                int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+                int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
                 if (camera3Stream != nullptr) {
                     usage = camera3Stream->getUsage();
                     streamUseCase = camera3Stream->getStreamUseCase();
                 }
                 streamStats.emplace_back(stream->getWidth(), stream->getHeight(),
-                    stream->getFormat(), stream->getDataSpace(), usage,
+                    stream->getFormat(), streamMaxPreviewFps, stream->getDataSpace(), usage,
                     stream->getMaxHalBuffers(),
                     stream->getMaxTotalBuffers() - stream->getMaxHalBuffers(),
                     stream->getDynamicRangeProfile(), streamUseCase);
@@ -1869,7 +1889,7 @@
             }
             listener->notifyIdle(requestCount, resultErrorCount, deviceError, streamStats);
         } else {
-            res = listener->notifyActive();
+            res = listener->notifyActive(sessionMaxPreviewFps);
         }
     }
     if (res != OK) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 453ac3a..c1ba88a 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -141,7 +141,7 @@
             uint64_t consumerUsage = 0,
             int64_t dynamicRangeProfile =
             ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) override;
 
@@ -156,7 +156,7 @@
             uint64_t consumerUsage = 0,
             int64_t dynamicRangeProfile =
             ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) override;
 
@@ -294,6 +294,8 @@
     status_t disconnectImpl();
     static status_t removeFwkOnlyRegionKeys(CameraMetadata *request);
 
+    float getMaxPreviewFps(sp<camera3::Camera3OutputStreamInterface> stream);
+
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 2497c22..b5d0746 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -34,7 +34,7 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution, int64_t dynamicRangeProfile, int streamUseCase,
+        int setId, bool isMultiResolution, int64_t dynamicRangeProfile, int64_t streamUseCase,
         bool deviceTimeBaseIsRealtime, int timestampBase) :
         Camera3Stream(id, type,
                 width, height, maxSize, format, dataSpace, rotation,
@@ -91,7 +91,7 @@
     }
     lines.appendFormat("      Dynamic Range Profile: 0x%" PRIx64,
             camera_stream::dynamic_range_profile);
-    lines.appendFormat("      Stream use case: %d\n", camera_stream::use_case);
+    lines.appendFormat("      Stream use case: %" PRId64 "\n", camera_stream::use_case);
     lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
             mFrameCount, mLastTimestamp);
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index e757ec6..f389d53 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -39,7 +39,7 @@
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 8ae16e5..0f61065 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -47,7 +47,7 @@
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-        int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
         int mirrorMode) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
@@ -80,7 +80,7 @@
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-        int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
         int mirrorMode) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
@@ -118,7 +118,7 @@
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-        int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
         int mirrorMode) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
@@ -163,7 +163,7 @@
                                          const std::unordered_set<int32_t> &sensorPixelModesUsed,
                                          uint64_t consumerUsage, nsecs_t timestampOffset,
                                          int setId, bool isMultiResolution,
-                                         int64_t dynamicRangeProfile, int streamUseCase,
+                                         int64_t dynamicRangeProfile, int64_t streamUseCase,
                                          bool deviceTimeBaseIsRealtime, int timestampBase,
                                          int mirrorMode) :
         Camera3IOStreamBase(id, type, width, height,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index e777e85..7b12efc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -91,7 +91,7 @@
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
@@ -108,7 +108,7 @@
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
@@ -124,7 +124,7 @@
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
@@ -256,7 +256,7 @@
             uint64_t consumerUsage = 0, nsecs_t timestampOffset = 0,
             int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 198e32f..c09a0b2 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -34,7 +34,7 @@
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool useHalBufManager, int64_t dynamicProfile,
-        int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
         int mirrorMode) :
         Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 9be0c86..8f7f00b 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -42,7 +42,7 @@
             int setId = CAMERA3_STREAM_SET_ID_INVALID,
             bool useHalBufManager = false,
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
-            int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+            int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
             int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 396b316..7ad6649 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -55,7 +55,7 @@
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-        int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase) :
+        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase) :
     camera_stream(),
     mId(id),
     mSetId(setId),
@@ -179,7 +179,7 @@
     return camera_stream::max_buffers;
 }
 
-int Camera3Stream::getStreamUseCase() const {
+int64_t Camera3Stream::getStreamUseCase() const {
     return camera_stream::use_case;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index d1545cc..d429e6c 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -178,7 +178,7 @@
     android_dataspace getOriginalDataSpace() const;
     int               getMaxHalBuffers() const;
     const String8&    physicalCameraId() const;
-    int               getStreamUseCase() const;
+    int64_t           getStreamUseCase() const;
     int               getTimestampBase() const;
     bool              isDeviceTimeBaseRealtime() const;
 
@@ -509,7 +509,7 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-            int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase);
+            int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase);
 
     wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
 
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 77c6483..5c333a4 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -66,7 +66,7 @@
 
     std::unordered_set<int32_t> sensor_pixel_modes_used;
     int64_t dynamic_range_profile;
-    int use_case;
+    int64_t use_case;
 } camera_stream_t;
 
 typedef struct camera_stream_buffer {
@@ -111,7 +111,7 @@
         bool supportsOffline = false;
         std::unordered_set<int32_t> sensorPixelModesUsed;
         int64_t dynamicRangeProfile;
-        int streamUseCase;
+        int64_t streamUseCase;
         int timestampBase;
         int mirrorMode;
         OutputStreamInfo() :
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 4894ba9..87d3ee8 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -972,7 +972,7 @@
         }
         if (src->use_case != ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT &&
                 mHidlSession_3_8 == nullptr) {
-            ALOGE("%s: Camera device doesn't support non-default stream use case %d!",
+            ALOGE("%s: Camera device doesn't support non-default stream use case %" PRId64 "!",
                     __FUNCTION__, src->use_case);
             return BAD_VALUE;
         }
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 8699543..82d58e0 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -66,10 +66,11 @@
     }
 }
 
-void CameraServiceProxyWrapper::CameraSessionStatsWrapper::onActive() {
+void CameraServiceProxyWrapper::CameraSessionStatsWrapper::onActive(float maxPreviewFps) {
     Mutex::Autolock l(mLock);
 
     mSessionStats.mNewCameraState = CameraSessionStats::CAMERA_STATE_ACTIVE;
+    mSessionStats.mMaxPreviewFps = maxPreviewFps;
     updateProxyDeviceState(mSessionStats);
 
     // Reset mCreationDuration to -1 to distinguish between 1st session
@@ -158,7 +159,7 @@
     sessionStats->onStreamConfigured(operatingMode, internalConfig, latencyMs);
 }
 
-void CameraServiceProxyWrapper::logActive(const String8& id) {
+void CameraServiceProxyWrapper::logActive(const String8& id, float maxPreviewFps) {
     std::shared_ptr<CameraSessionStatsWrapper> sessionStats;
     {
         Mutex::Autolock l(mLock);
@@ -171,7 +172,7 @@
     }
 
     ALOGV("%s: id %s", __FUNCTION__, id.c_str());
-    sessionStats->onActive();
+    sessionStats->onActive(maxPreviewFps);
 }
 
 void CameraServiceProxyWrapper::logIdle(const String8& id,
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index f701e94..037316d 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -48,7 +48,7 @@
         void onOpen();
         void onClose(int32_t latencyMs);
         void onStreamConfigured(int operatingMode, bool internalReconfig, int32_t latencyMs);
-        void onActive();
+        void onActive(float maxPreviewFps);
         void onIdle(int64_t requestCount, int64_t resultErrorCount, bool deviceError,
                 const std::vector<hardware::CameraStreamStats>& streamStats);
     };
@@ -81,7 +81,7 @@
             int32_t latencyMs);
 
     // Session state becomes active
-    static void logActive(const String8& id);
+    static void logActive(const String8& id, float maxPreviewFps);
 
     // Session state becomes idle
     static void logIdle(const String8& id,
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index af00e81..4090dae 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -313,7 +313,7 @@
     }
 }
 
-bool isStreamUseCaseSupported(int streamUseCase,
+bool isStreamUseCaseSupported(int64_t streamUseCase,
         const CameraMetadata &deviceInfo) {
     camera_metadata_ro_entry_t availableStreamUseCases =
             deviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES);
@@ -324,7 +324,7 @@
     }
 
     for (size_t i = 0; i < availableStreamUseCases.count; i++) {
-        if (availableStreamUseCases.data.i32[i] == streamUseCase) {
+        if (availableStreamUseCases.data.i64[i] == streamUseCase) {
             return true;
         }
     }
@@ -336,7 +336,7 @@
         sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
         const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
         const std::vector<int32_t> &sensorPixelModesUsed, int64_t dynamicRangeProfile,
-        int streamUseCase, int timestampBase, int mirrorMode) {
+        int64_t streamUseCase, int timestampBase, int mirrorMode) {
     // bufferProducer must be non-null
     if (gbp == nullptr) {
         String8 msg = String8::format("Camera %s: Surface is NULL", logicalCameraId.string());
@@ -452,7 +452,7 @@
     }
     if (!SessionConfigurationUtils::isStreamUseCaseSupported(streamUseCase,
             physicalCameraMetadata)) {
-        String8 msg = String8::format("Camera %s: stream use case %d not supported,"
+        String8 msg = String8::format("Camera %s: stream use case %" PRId64 " not supported,"
                 " failed to create output stream", logicalCameraId.string(), streamUseCase);
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
@@ -718,7 +718,7 @@
             return res;
         }
 
-        int streamUseCase = it.getStreamUseCase();
+        int64_t streamUseCase = it.getStreamUseCase();
         int timestampBase = it.getTimestampBase();
         int mirrorMode = it.getMirrorMode();
         if (deferredConsumer) {
@@ -1040,7 +1040,7 @@
             // image
             return false;
         }
-        if (static_cast<int32_t>(streamConfigV38.streams[i].useCase) !=
+        if (static_cast<int64_t>(streamConfigV38.streams[i].useCase) !=
                 ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {
             // ICameraDevice older than 3.8 doesn't support stream use case
             return false;
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 406510f..8abcc95 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -99,7 +99,7 @@
         sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
         const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
         const std::vector<int32_t> &sensorPixelModesUsed,  int64_t dynamicRangeProfile,
-        int streamUseCase, int timestampBase, int mirrorMode);
+        int64_t streamUseCase, int timestampBase, int mirrorMode);
 
 void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
         camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
@@ -114,7 +114,7 @@
 // Check if the device supports a given dynamicRangeProfile
 bool isDynamicRangeProfileSupported(int64_t dynamicRangeProfile, const CameraMetadata& staticMeta);
 
-bool isStreamUseCaseSupported(int streamUseCase, const CameraMetadata &deviceInfo);
+bool isStreamUseCaseSupported(int64_t streamUseCase, const CameraMetadata &deviceInfo);
 
 void mapStreamInfo(const OutputStreamInfo &streamInfo,
         camera3::camera_stream_rotation_t rotation, String8 physicalId,