Merge "Clear MultiAccessUnit SkipCutBuffer" into main am: 6f94492234 am: 3599ac65d2

Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/2953974

Change-Id: I367bc5ad150da837ccd6c01011198f69e6528ec0
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/aidl/android/media/audio/IHalAdapterVendorExtension.aidl b/aidl/android/media/audio/IHalAdapterVendorExtension.aidl
index b7a7678..48fb291 100644
--- a/aidl/android/media/audio/IHalAdapterVendorExtension.aidl
+++ b/aidl/android/media/audio/IHalAdapterVendorExtension.aidl
@@ -23,6 +23,8 @@
  * is optional. Vendors may provide an implementation on the system_ext
  * partition. The default instance of this interface, if provided, must be
  * registered prior to the moment when the audio server connects to HAL modules.
+ * Vendors need to set the system property `ro.audio.ihaladaptervendorextension_enabled`
+ * to `true` for the framework to bind to this service.
  *
  * {@hide}
  */
diff --git a/camera/Android.bp b/camera/Android.bp
index 22f1633..4c5b160 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -46,6 +46,7 @@
 aconfig_declarations {
     name: "camera_platform_flags",
     package: "com.android.internal.camera.flags",
+    container: "system",
     srcs: ["camera_platform.aconfig"],
 }
 
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 2e808d1..424923a 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -880,7 +880,7 @@
     return OK;
 }
 
-metadata_vendor_id_t CameraMetadata::getVendorId() {
+metadata_vendor_id_t CameraMetadata::getVendorId() const {
     return get_camera_metadata_vendor_id(mBuffer);
 }
 
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index fb26f83..c12a1a1 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -466,7 +466,7 @@
 
 int VendorTagDescriptorCache::getTagType(uint32_t tag,
         metadata_vendor_id_t id) const {
-    int ret = 0;
+    int ret = -1;
     auto desc = mVendorMap.find(id);
     if (desc != mVendorMap.end()) {
         ret = desc->second->getTagType(tag);
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 0eeeb7f..4bea896 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -278,14 +278,28 @@
     CameraMetadataNative createDefaultRequest(@utf8InCpp String cameraId, int templateId);
 
     /**
-      * Check whether a particular session configuration with optional session parameters
-      * has camera device support.
-      *
-      * @param cameraId The camera id to query session configuration on
-      * @param sessionConfiguration Specific session configuration to be verified.
-      * @return true  - in case the stream combination is supported.
-      *         false - in case there is no device support.
-      */
+     * Check whether a particular session configuration with optional session parameters
+     * has camera device support.
+     *
+     * @param cameraId The camera id to query session configuration for
+     * @param sessionConfiguration Specific session configuration to be verified.
+     * @return true  - in case the stream combination is supported.
+     *         false - in case there is no device support.
+     */
     boolean isSessionConfigurationWithParametersSupported(@utf8InCpp String cameraId,
             in SessionConfiguration sessionConfiguration);
+
+    /**
+     * Get the camera characteristics for a particular session configuration for
+     * the given camera device.
+     *
+     * @param cameraId ID of the device for which the session characteristics must be fetched.
+     * @param sessionConfiguration session configuration for which the characteristics
+     * must be fetched.
+     * @return - characteristics associated with the given session.
+     */
+    CameraMetadataNative getSessionCharacteristics(@utf8InCpp String cameraId,
+                int targetSdkVersion,
+                boolean overrideToPortrait,
+                in SessionConfiguration sessionConfiguration);
 }
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 843e0d4..8e1fcc0 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -99,15 +99,6 @@
       */
     boolean isSessionConfigurationSupported(in SessionConfiguration sessionConfiguration);
 
-    /**
-     * Get the camera characteristics for a particular session configuration
-     *
-     * @param sessionConfiguration Specific session configuration for which the characteristics
-     * are fetched.
-     * @return - characteristics associated with the given session.
-     */
-    CameraMetadataNative getSessionCharacteristics(in SessionConfiguration sessionConfiguration);
-
     void deleteStream(int streamId);
 
     /**
diff --git a/camera/camera_platform.aconfig b/camera/camera_platform.aconfig
index 5d2a263..c3da4a9 100644
--- a/camera/camera_platform.aconfig
+++ b/camera/camera_platform.aconfig
@@ -1,4 +1,5 @@
 package: "com.android.internal.camera.flags"
+container: "system"
 
 flag {
      namespace: "camera_platform"
@@ -76,3 +77,45 @@
      description: "Enable creating MultiResolutionImageReader with usage flag configuration"
      bug: "301588215"
 }
+
+flag {
+     namespace: "camera_platform"
+     name: "use_ro_board_api_level_for_vndk_version"
+     description: "Enable using ro.board.api_level instead of ro.vndk.version to get VNDK version"
+     bug: "312315580"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "camera_extensions_characteristics_get"
+     description: "Enable get extension specific camera characteristics API"
+     bug: "280649914"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "delay_lazy_hal_instantiation"
+     description: "Only trigger lazy HAL instantiation when the HAL is needed for an operation."
+     bug: "319735068"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "return_buffers_outside_locks"
+     description: "Enable returning graphics buffers to buffer queues without holding the in-flight mutex"
+     bug: "315526878"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "camera_device_setup"
+     description: "Create an intermediate Camera Device class for limited CameraDevice access."
+     bug: "320741775"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "camera_privacy_allowlist"
+     description: "Allowlisting to exempt safety-relevant cameras from privacy control for automotive devices"
+     bug: "282814430"
+}
diff --git a/camera/include/camera/CameraMetadata.h b/camera/include/camera/CameraMetadata.h
index c56ee6d..2903dfb 100644
--- a/camera/include/camera/CameraMetadata.h
+++ b/camera/include/camera/CameraMetadata.h
@@ -245,7 +245,7 @@
     /**
      * Return the current vendor tag id associated with this metadata.
      */
-    metadata_vendor_id_t getVendorId();
+    metadata_vendor_id_t getVendorId() const;
 
   private:
     camera_metadata_t *mBuffer;
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index d4dd546..165395a 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -154,8 +154,8 @@
         "libcamera_metadata",
         "libmediandk",
         "android.frameworks.cameraservice.common-V1-ndk",
-        "android.frameworks.cameraservice.device-V1-ndk",
-        "android.frameworks.cameraservice.service-V1-ndk",
+        "android.frameworks.cameraservice.device-V2-ndk",
+        "android.frameworks.cameraservice.service-V2-ndk",
     ],
     static_libs: [
         "android.hardware.camera.common@1.0-helper",
@@ -188,7 +188,6 @@
     ],
     static_libs: [
         "android.hardware.camera.common@1.0-helper",
-        "android.hidl.token@1.0",
     ],
     cflags: [
         "-D__ANDROID_VNDK__",
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
index 4387cc6..92de1e4 100644
--- a/camera/ndk/NdkCameraCaptureSession.cpp
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -213,7 +213,7 @@
 EXPORT
 camera_status_t ACameraCaptureSession_prepareWindow(
         ACameraCaptureSession* session,
-        ACameraWindowType *window) {
+        ANativeWindow *window) {
     ATRACE_CALL();
     if (session == nullptr || window == nullptr) {
         ALOGE("%s: Error: session %p / window %p is null", __FUNCTION__, session, window);
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 8211671..f2ec573 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -124,7 +124,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionOutput_create(
-        ACameraWindowType* window, /*out*/ACaptureSessionOutput** out) {
+        ANativeWindow* window, /*out*/ACaptureSessionOutput** out) {
     ATRACE_CALL();
     if (window == nullptr || out == nullptr) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
@@ -137,7 +137,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionSharedOutput_create(
-        ACameraWindowType* window, /*out*/ACaptureSessionOutput** out) {
+        ANativeWindow* window, /*out*/ACaptureSessionOutput** out) {
     ATRACE_CALL();
     if (window == nullptr || out == nullptr) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
@@ -150,7 +150,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionPhysicalOutput_create(
-        ACameraWindowType* window, const char* physicalId,
+        ANativeWindow* window, const char* physicalId,
         /*out*/ACaptureSessionOutput** out) {
     ATRACE_CALL();
     if (window == nullptr || physicalId == nullptr || out == nullptr) {
@@ -164,7 +164,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *out,
-        ACameraWindowType* window) {
+        ANativeWindow* window) {
     ATRACE_CALL();
     if ((window == nullptr) || (out == nullptr)) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
@@ -190,7 +190,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *out,
-        ACameraWindowType* window) {
+        ANativeWindow* window) {
     ATRACE_CALL();
     if ((window == nullptr) || (out == nullptr)) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index 87de4a9..b851a1d 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -27,7 +27,7 @@
 
 EXPORT
 camera_status_t ACameraOutputTarget_create(
-        ACameraWindowType* window, ACameraOutputTarget** out) {
+        ANativeWindow* window, ACameraOutputTarget** out) {
     ATRACE_CALL();
     if (window == nullptr) {
         ALOGE("%s: Error: input window is null", __FUNCTION__);
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
index 73439c7..449c0b4 100644
--- a/camera/ndk/impl/ACameraCaptureSession.cpp
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -146,7 +146,7 @@
     return ret;
 }
 
-camera_status_t ACameraCaptureSession::prepare(ACameraWindowType* window) {
+camera_status_t ACameraCaptureSession::prepare(ANativeWindow* window) {
 #ifdef __ANDROID_VNDK__
     std::shared_ptr<acam::CameraDevice> dev = getDevicePtr();
 #else
diff --git a/camera/ndk/impl/ACameraCaptureSession.h b/camera/ndk/impl/ACameraCaptureSession.h
index 88135ba..0d7a2c1 100644
--- a/camera/ndk/impl/ACameraCaptureSession.h
+++ b/camera/ndk/impl/ACameraCaptureSession.h
@@ -23,14 +23,14 @@
 
 #ifdef __ANDROID_VNDK__
 #include "ndk_vendor/impl/ACameraDevice.h"
-#include "ndk_vendor/impl/ACameraCaptureSessionVendor.h"
 #else
 #include "ACameraDevice.h"
+#endif
 
 using namespace android;
 
 struct ACaptureSessionOutput {
-    explicit ACaptureSessionOutput(ACameraWindowType* window, bool isShared = false,
+    explicit ACaptureSessionOutput(ANativeWindow* window, bool isShared = false,
             const char* physicalCameraId = "") :
             mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
 
@@ -47,28 +47,27 @@
         return mWindow > other.mWindow;
     }
 
-    inline bool isWindowEqual(ACameraWindowType* window) const {
+    inline bool isWindowEqual(ANativeWindow* window) const {
         return mWindow == window;
     }
 
     // returns true if the window was successfully added, false otherwise.
-    inline bool addSharedWindow(ACameraWindowType* window) {
+    inline bool addSharedWindow(ANativeWindow* window) {
         auto ret = mSharedWindows.insert(window);
         return ret.second;
     }
 
     // returns the number of elements removed.
-    inline size_t removeSharedWindow(ACameraWindowType* window) {
+    inline size_t removeSharedWindow(ANativeWindow* window) {
         return mSharedWindows.erase(window);
     }
 
-    ACameraWindowType* mWindow;
-    std::set<ACameraWindowType *> mSharedWindows;
+    ANativeWindow* mWindow;
+    std::set<ANativeWindow*> mSharedWindows;
     bool           mIsShared;
     int            mRotation = CAMERA3_STREAM_ROTATION_0;
     std::string mPhysicalCameraId;
 };
-#endif
 
 struct ACaptureSessionOutputContainer {
     std::set<ACaptureSessionOutput> mOutputs;
@@ -147,7 +146,7 @@
         mPreparedCb.context = context;
         mPreparedCb.onWindowPrepared = cb;
     }
-    camera_status_t prepare(ACameraWindowType *window);
+    camera_status_t prepare(ANativeWindow *window);
 
     ACameraDevice* getDevice();
 
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 97d65b0..1fa71f4 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -341,7 +341,7 @@
     return ACAMERA_OK;
 }
 
-camera_status_t CameraDevice::prepareLocked(ACameraWindowType *window) {
+camera_status_t CameraDevice::prepareLocked(ANativeWindow *window) {
     camera_status_t ret = checkCameraClosedOrErrorLocked();
     if (ret != ACAMERA_OK) {
         return ret;
@@ -1097,7 +1097,7 @@
                     if (onWindowPrepared == nullptr) {
                         return;
                     }
-                    ACameraWindowType* anw;
+                    ANativeWindow* anw;
                     found = msg->findPointer(kAnwKey, (void**) &anw);
                     if (!found) {
                         ALOGE("%s: Cannot find ANativeWindow: %d!", __FUNCTION__, __LINE__);
@@ -1823,7 +1823,7 @@
         return ret;
     }
     // We've found the window corresponding to the surface id.
-    ACameraWindowType *window = it->second.first;
+    ANativeWindow *window = it->second.first;
     sp<AMessage> msg = new AMessage(kWhatPreparedCb, dev->mHandler);
     msg->setPointer(kContextKey, session->mPreparedCb.context);
     msg->setPointer(kAnwKey, window);
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 4658d18..2b9f327 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -151,7 +151,7 @@
 
     camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
 
-    camera_status_t prepareLocked(ACameraWindowType *window);
+    camera_status_t prepareLocked(ANativeWindow *window);
 
     camera_status_t allocateCaptureRequest(
             const ACaptureRequest* request, sp<CaptureRequest>& outReq);
diff --git a/camera/ndk/impl/ACaptureRequest.h b/camera/ndk/impl/ACaptureRequest.h
index 2ffcafe..118c2a5 100644
--- a/camera/ndk/impl/ACaptureRequest.h
+++ b/camera/ndk/impl/ACaptureRequest.h
@@ -22,11 +22,8 @@
 
 using namespace android;
 
-#ifdef __ANDROID_VNDK__
-#include "ndk_vendor/impl/ACaptureRequestVendor.h"
-#else
 struct ACameraOutputTarget {
-    explicit ACameraOutputTarget(ACameraWindowType* window) : mWindow(window) {};
+    explicit ACameraOutputTarget(ANativeWindow* window) : mWindow(window) {};
 
     bool operator == (const ACameraOutputTarget& other) const {
         return mWindow == other.mWindow;
@@ -41,9 +38,8 @@
         return mWindow > other.mWindow;
     }
 
-    ACameraWindowType* mWindow;
+    ANativeWindow* mWindow;
 };
-#endif
 
 struct ACameraOutputTargets {
     std::set<ACameraOutputTarget> mOutputs;
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 099c5c5..cf6b970 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -124,7 +124,7 @@
  */
 typedef void (*ACameraCaptureSession_prepareCallback)(
         void *context,
-        ACameraWindowType *window,
+        ANativeWindow *window,
         ACameraCaptureSession *session);
 
 /// Enum for describing error reason in {@link ACameraCaptureFailure}
@@ -276,7 +276,7 @@
  */
 typedef void (*ACameraCaptureSession_captureCallback_bufferLost)(
         void* context, ACameraCaptureSession* session,
-        ACaptureRequest* request, ACameraWindowType* window, int64_t frameNumber);
+        ACaptureRequest* request, ANativeWindow* window, int64_t frameNumber);
 
 /**
  * ACaptureCaptureSession_captureCallbacks structure used in
@@ -1088,7 +1088,7 @@
  * and no pre-allocation is done.</p>
  *
  * @param session the {@link ACameraCaptureSession} that needs to prepare output buffers.
- * @param window the {@link ACameraWindowType} for which the output buffers need to be prepared.
+ * @param window the {@link ANativeWindow} for which the output buffers need to be prepared.
  *
  * @return <ul><li>
  *             {@link ACAMERA_OK} if the method succeeds</li>
@@ -1102,7 +1102,7 @@
  */
 camera_status_t ACameraCaptureSession_prepareWindow(
     ACameraCaptureSession* session,
-    ACameraWindowType *window) __INTRODUCED_IN(34);
+    ANativeWindow *window) __INTRODUCED_IN(34);
 __END_DECLS
 
 #endif /* _NDK_CAMERA_CAPTURE_SESSION_H */
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index de10eb3..fbd0ee1 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -364,7 +364,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionOutput_create(
-        ACameraWindowType* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(24);
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(24);
 
 /**
  * Free a ACaptureSessionOutput object.
@@ -705,7 +705,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionSharedOutput_create(
-        ACameraWindowType* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(28);
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(28);
 
 /**
  * Add a native window to shared ACaptureSessionOutput.
@@ -723,7 +723,7 @@
  *             ACaptureSessionOutput.</li></ul>
  */
 camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *output,
-        ACameraWindowType *anw) __INTRODUCED_IN(28);
+        ANativeWindow *anw) __INTRODUCED_IN(28);
 
 /**
  * Remove a native window from shared ACaptureSessionOutput.
@@ -739,7 +739,7 @@
  *             ACaptureSessionOutput.</li></ul>
  */
 camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *output,
-        ACameraWindowType* anw) __INTRODUCED_IN(28);
+        ANativeWindow* anw) __INTRODUCED_IN(28);
 
 /**
  * Create a new camera capture session similar to {@link ACameraDevice_createCaptureSession}. This
@@ -797,7 +797,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionPhysicalOutput_create(
-        ACameraWindowType* anw, const char* physicalId,
+        ANativeWindow* anw, const char* physicalId,
         /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(29);
 
 /**
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2c68cef..1ed17a3 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -76,6 +76,7 @@
     ACAMERA_AUTOMOTIVE_LENS,
     ACAMERA_EXTENSION,
     ACAMERA_JPEGR,
+    ACAMERA_EFV,
     ACAMERA_SECTION_COUNT,
 
     ACAMERA_VENDOR = 0x8000
@@ -123,6 +124,7 @@
     ACAMERA_AUTOMOTIVE_LENS_START  = ACAMERA_AUTOMOTIVE_LENS   << 16,
     ACAMERA_EXTENSION_START        = ACAMERA_EXTENSION         << 16,
     ACAMERA_JPEGR_START            = ACAMERA_JPEGR             << 16,
+    ACAMERA_EFV_START              = ACAMERA_EFV               << 16,
     ACAMERA_VENDOR_START           = ACAMERA_VENDOR            << 16
 } acamera_metadata_section_start_t;
 
@@ -4705,18 +4707,21 @@
      * </ul>
      * <p>should be interpreted in the effective after raw crop field-of-view coordinate system.
      * In this coordinate system,
-     * {preCorrectionActiveArraySize.left, preCorrectionActiveArraySize.top} corresponds to the
+     * {ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.left,
+     *  ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.top} corresponds to the
      * the top left corner of the cropped RAW frame and
-     * {preCorrectionActiveArraySize.right, preCorrectionActiveArraySize.bottom} corresponds to
+     * {ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.right,
+     *  ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.bottom} corresponds to
      * the bottom right corner. Client applications must use the values of the keys
      * in the CaptureResult metadata if present.</p>
-     * <p>Crop regions (android.scaler.CropRegion), AE/AWB/AF regions and face coordinates still
+     * <p>Crop regions ACAMERA_SCALER_CROP_REGION, AE/AWB/AF regions and face coordinates still
      * use the ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE coordinate system as usual.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
      * @see ACAMERA_LENS_POSE_ROTATION
      * @see ACAMERA_LENS_POSE_TRANSLATION
+     * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
@@ -11524,6 +11529,7 @@
 
 
 
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_TAGS_H */
diff --git a/camera/ndk/include/camera/NdkCameraWindowType.h b/camera/ndk/include/camera/NdkCameraWindowType.h
index 0838fba..2217528 100644
--- a/camera/ndk/include/camera/NdkCameraWindowType.h
+++ b/camera/ndk/include/camera/NdkCameraWindowType.h
@@ -41,14 +41,11 @@
  * camera2 NDK. This enables us to share the api definition headers and avoid
  * code duplication (since the VNDK variant doesn't use ANativeWindow unlike the
  * NDK variant).
+ * @deprecated No longer needed. Both NDK and VNDK use ANativeWindow now.
+ *             Use ANativeWindow directly.
  */
-#ifdef __ANDROID_VNDK__
-#include <cutils/native_handle.h>
-typedef const native_handle_t ACameraWindowType;
-#else
 #include <android/native_window.h>
 typedef ANativeWindow ACameraWindowType;
-#endif
 
 /** @} */
 
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index dc18544..5ccb510 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -99,7 +99,7 @@
  *
  * @see ACaptureRequest_addTarget
  */
-camera_status_t ACameraOutputTarget_create(ACameraWindowType* window,
+camera_status_t ACameraOutputTarget_create(ANativeWindow* window,
         ACameraOutputTarget** output) __INTRODUCED_IN(24);
 
 /**
diff --git a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h b/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
deleted file mode 100644
index 45098c3..0000000
--- a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "utils.h"
-
-#include <android/binder_auto_utils.h>
-#include <string>
-#include <set>
-
-using ::android::acam::utils::native_handle_ptr_wrapper;
-
-struct ACaptureSessionOutput {
-    explicit ACaptureSessionOutput(const native_handle_t* window, bool isShared = false,
-            const char* physicalCameraId = "") :
-            mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
-
-    bool operator == (const ACaptureSessionOutput& other) const {
-        return (mWindow == other.mWindow);
-    }
-
-    bool operator != (const ACaptureSessionOutput& other) const {
-        return mWindow != other.mWindow;
-    }
-
-    bool operator < (const ACaptureSessionOutput& other) const {
-        return mWindow < other.mWindow;
-    }
-
-    bool operator > (const ACaptureSessionOutput& other) const {
-        return mWindow > other.mWindow;
-    }
-
-    inline bool isWindowEqual(ACameraWindowType* window) const {
-        return mWindow == native_handle_ptr_wrapper(window);
-    }
-
-    // returns true if the window was successfully added, false otherwise.
-    inline bool addSharedWindow(ACameraWindowType* window) {
-        auto ret = mSharedWindows.insert(window);
-        return ret.second;
-    }
-
-    // returns the number of elements removed.
-    inline size_t removeSharedWindow(ACameraWindowType* window) {
-        return mSharedWindows.erase(window);
-    }
-
-    native_handle_ptr_wrapper mWindow;
-    std::set<native_handle_ptr_wrapper> mSharedWindows;
-    bool           mIsShared;
-    int            mRotation = CAMERA3_STREAM_ROTATION_0;
-    std::string mPhysicalCameraId;
-};
-
-
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index 87102e4..3325da6 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -26,7 +26,7 @@
 #include <aidl/android/frameworks/cameraservice/device/CameraMetadata.h>
 #include <aidl/android/frameworks/cameraservice/device/OutputConfiguration.h>
 #include <aidl/android/frameworks/cameraservice/device/SessionConfiguration.h>
-#include <aidlcommonsupport/NativeHandle.h>
+#include <android/native_window_aidl.h>
 #include <inttypes.h>
 #include <map>
 #include <utility>
@@ -59,6 +59,7 @@
 using AidlCameraMetadata = ::aidl::android::frameworks::cameraservice::device::CameraMetadata;
 using ::aidl::android::frameworks::cameraservice::device::OutputConfiguration;
 using ::aidl::android::frameworks::cameraservice::device::SessionConfiguration;
+using ::aidl::android::view::Surface;
 using ::ndk::ScopedAStatus;
 
 // Static member definitions
@@ -231,8 +232,9 @@
         OutputConfiguration& outputStream = sessionConfig.outputStreams[index];
         outputStream.rotation = utils::convertToAidl(output.mRotation);
         outputStream.windowGroupId = -1;
-        outputStream.windowHandles.resize(output.mSharedWindows.size() + 1);
-        outputStream.windowHandles[0] = std::move(dupToAidl(output.mWindow));
+        auto& surfaces = outputStream.surfaces;
+        surfaces.reserve(output.mSharedWindows.size() + 1);
+        surfaces.emplace_back(output.mWindow);
         outputStream.physicalCameraId = output.mPhysicalCameraId;
         index++;
     }
@@ -298,12 +300,12 @@
 
     OutputConfiguration outConfig;
     outConfig.rotation = utils::convertToAidl(output->mRotation);
-    outConfig.windowHandles.resize(output->mSharedWindows.size() + 1);
-    outConfig.windowHandles[0] = std::move(dupToAidl(output->mWindow));
+    auto& surfaces = outConfig.surfaces;
+    surfaces.reserve(output->mSharedWindows.size() + 1);
+    surfaces.emplace_back(output->mWindow);
     outConfig.physicalCameraId = output->mPhysicalCameraId;
-    int i = 1;
     for (auto& anw : output->mSharedWindows) {
-        outConfig.windowHandles[i++] = std::move(dupToAidl(anw));
+        surfaces.emplace_back(anw);
     }
 
     auto remoteRet = mRemote->updateOutputConfiguration(streamId,
@@ -340,7 +342,7 @@
     return ACAMERA_OK;
 }
 
-camera_status_t CameraDevice::prepareLocked(ACameraWindowType *window) {
+camera_status_t CameraDevice::prepareLocked(ANativeWindow *window) {
     camera_status_t ret = checkCameraClosedOrErrorLocked();
     if (ret != ACAMERA_OK) {
         return ret;
@@ -387,18 +389,19 @@
     std::vector<int32_t> requestSurfaceIdxList;
 
     for (auto& outputTarget : request->targets->mOutputs) {
-        native_handle_ptr_wrapper anw = outputTarget.mWindow;
+        ANativeWindow *anw = outputTarget.mWindow;
         bool found = false;
         req->mSurfaceList.push_back(anw);
         // lookup stream/surface ID
         for (const auto& kvPair : mConfiguredOutputs) {
             int streamId = kvPair.first;
             const OutputConfiguration& outConfig = kvPair.second.second;
-            const auto& windowHandles = outConfig.windowHandles;
-            for (int surfaceId = 0; surfaceId < (int) windowHandles.size(); surfaceId++) {
+            const auto& surfaces = outConfig.surfaces;
+            for (int surfaceId = 0; surfaceId < (int) surfaces.size(); surfaceId++) {
                 // If two window handles point to the same native window,
                 // they have the same surfaces.
-                if (utils::isWindowNativeHandleEqual(anw, windowHandles[surfaceId])) {
+                auto& surface = surfaces[surfaceId];
+                if (anw == surface.get()) {
                     found = true;
                     requestStreamIdxList.push_back(streamId);
                     requestSurfaceIdxList.push_back(surfaceId);
@@ -410,7 +413,7 @@
             }
         }
         if (!found) {
-            ALOGE("Unconfigured output target %p in capture request!", anw.mWindow);
+            ALOGE("Unconfigured output target %p in capture request!", anw);
             return ACAMERA_ERROR_INVALID_PARAMETER;
         }
     }
@@ -470,7 +473,7 @@
     }
     pRequest->targets = new ACameraOutputTargets();
     for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
-        native_handle_ptr_wrapper anw = req->mSurfaceList[i];
+        ANativeWindow *anw = req->mSurfaceList[i];
         ACameraOutputTarget outputTarget(anw);
         pRequest->targets->mOutputs.insert(std::move(outputTarget));
     }
@@ -637,20 +640,21 @@
         return ret;
     }
 
-    std::map<native_handle_ptr_wrapper, OutputConfiguration> handleToConfig;
+    std::map<ANativeWindow *, OutputConfiguration> windowToConfig;
     for (const auto& outConfig : outputs->mOutputs) {
-        native_handle_ptr_wrapper anw = outConfig.mWindow;
+        ANativeWindow *anw = outConfig.mWindow;
         OutputConfiguration outConfigInsert;
         outConfigInsert.rotation = utils::convertToAidl(outConfig.mRotation);
         outConfigInsert.windowGroupId = -1;
-        outConfigInsert.windowHandles.resize(outConfig.mSharedWindows.size() + 1);
-        outConfigInsert.windowHandles[0] = std::move(dupToAidl(anw));
+        auto& surfaces = outConfigInsert.surfaces;
+        surfaces.reserve(outConfig.mSharedWindows.size() + 1);
+        surfaces.emplace_back(anw);
         outConfigInsert.physicalCameraId = outConfig.mPhysicalCameraId;
-        handleToConfig.insert({anw, std::move(outConfigInsert)});
+        windowToConfig.insert({anw, std::move(outConfigInsert)});
     }
 
-    std::set<native_handle_ptr_wrapper> addSet;
-    for (auto& kvPair : handleToConfig) {
+    std::set<ANativeWindow *> addSet;
+    for (auto& kvPair : windowToConfig) {
         addSet.insert(kvPair.first);
     }
 
@@ -663,8 +667,8 @@
         auto& anw = outputPair.first;
         auto& configuredOutput = outputPair.second;
 
-        auto itr = handleToConfig.find(anw);
-        if (itr != handleToConfig.end() && (itr->second) == configuredOutput) {
+        auto itr = windowToConfig.find(anw);
+        if (itr != windowToConfig.end() && (itr->second) == configuredOutput) {
             deleteList.push_back(streamId);
         } else {
             addSet.erase(anw);
@@ -714,13 +718,13 @@
     // add new streams
     for (const auto &anw : addSet) {
         int32_t streamId;
-        auto itr = handleToConfig.find(anw);
+        auto itr = windowToConfig.find(anw);
         remoteRet = mRemote->createStream(itr->second, &streamId);
         CHECK_TRANSACTION_AND_RET(remoteRet, "createStream()")
         mConfiguredOutputs.insert(std::make_pair(streamId,
                                                  std::make_pair(anw,
                                                                 std::move(itr->second))));
-        handleToConfig.erase(itr);
+        windowToConfig.erase(itr);
     }
 
     AidlCameraMetadata aidlParams;
@@ -867,9 +871,9 @@
         // Get the surfaces corresponding to the error stream id, go through
         // them and try to match the surfaces in the corresponding
         // CaptureRequest.
-        const auto& errorWindowHandles =
-                outputPairIt->second.second.windowHandles;
-        for (const auto& errorWindowHandle : errorWindowHandles) {
+        const auto& errorSurfaces =
+                outputPairIt->second.second.surfaces;
+        for (const auto& errorSurface : errorSurfaces) {
             for (const auto &requestStreamAndWindowId :
                         request->mCaptureRequest.streamAndWindowIds) {
                 // Go through the surfaces in the capture request and see which
@@ -884,12 +888,11 @@
                     return;
                 }
 
-                const auto &requestWindowHandles =
-                        requestSurfacePairIt->second.second.windowHandles;
+                const auto &requestSurfaces = requestSurfacePairIt->second.second.surfaces;
+                auto& requestSurface = requestSurfaces[requestWindowId];
 
-                if (requestWindowHandles[requestWindowId] == errorWindowHandle) {
-                    const native_handle_t* anw = makeFromAidl(
-                            requestWindowHandles[requestWindowId]);
+                if (requestSurface == errorSurface) {
+                    const ANativeWindow *anw = requestSurface.get();
                     ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
                             getId(), anw, frameNumber);
 
@@ -1085,7 +1088,7 @@
                     if (onWindowPrepared == nullptr) {
                         return;
                     }
-                    native_handle_t* anw;
+                    ANativeWindow* anw;
                     found = msg->findPointer(kAnwKey, (void**) &anw);
                     if (!found) {
                         ALOGE("%s: Cannot find ANativeWindow: %d!", __FUNCTION__, __LINE__);
@@ -1342,10 +1345,10 @@
                         return;
                     }
 
-                    native_handle_t* anw;
+                    ANativeWindow* anw;
                     found = msg->findPointer(kAnwKey, (void**) &anw);
                     if (!found) {
-                        ALOGE("%s: Cannot find native_handle_t!", __FUNCTION__);
+                        ALOGE("%s: Cannot find ANativeWindow!", __FUNCTION__);
                         return;
                     }
 
@@ -1359,7 +1362,6 @@
                     ACaptureRequest* request = allocateACaptureRequest(requestSp, id_cstr);
                     (*onBufferLost)(context, session.get(), request, anw, frameNumber);
                     freeACaptureRequest(request);
-                    native_handle_delete(anw); // clean up anw as it was copied from AIDL
                     break;
                 }
             }
@@ -1842,7 +1844,7 @@
         return ScopedAStatus::ok();
     }
     // We've found the window corresponding to the surface id.
-    const native_handle_t *anw = it->second.first.mWindow;
+    const ANativeWindow *anw = it->second.first;
     sp<AMessage> msg = new AMessage(kWhatPreparedCb, dev->mHandler);
     msg->setPointer(kContextKey, session->mPreparedCb.context);
     msg->setPointer(kAnwKey, (void *)anw);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 6e0c772..b771d47 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -66,7 +66,6 @@
 using ::aidl::android::frameworks::cameraservice::service::CameraStatusAndId;
 using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
 using ::android::AidlMessageQueue;
-using ::android::acam::utils::native_handle_ptr_wrapper;
 
 
 using ResultMetadataQueue = AidlMessageQueue<int8_t, SynchronizedReadWrite>;
@@ -197,7 +196,7 @@
 
     camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
 
-    camera_status_t prepareLocked(ACameraWindowType *window);
+    camera_status_t prepareLocked(ANativeWindow *window);
 
     // Since this writes to ICameraDeviceUser's fmq, clients must take care that:
     //   a) This function is called serially.
@@ -236,7 +235,7 @@
 
     // stream id -> pair of (ACameraWindowType* from application, OutputConfiguration used for
     // camera service)
-    std::map<int, std::pair<native_handle_ptr_wrapper, OutputConfiguration>> mConfiguredOutputs;
+    std::map<int, std::pair<ANativeWindow *, OutputConfiguration>> mConfiguredOutputs;
 
     // TODO: maybe a bool will suffice for synchronous implementation?
     std::atomic_bool mClosing;
diff --git a/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h b/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
deleted file mode 100644
index fcb7e34..0000000
--- a/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "utils.h"
-
-using ::android::acam::utils::native_handle_ptr_wrapper;
-struct ACameraOutputTarget {
-    explicit ACameraOutputTarget(const native_handle_t* window) : mWindow(window) {};
-
-    bool operator == (const ACameraOutputTarget& other) const {
-        return mWindow == other.mWindow;
-    }
-    bool operator != (const ACameraOutputTarget& other) const {
-        return mWindow != other.mWindow;
-    }
-    bool operator < (const ACameraOutputTarget& other) const {
-        return mWindow < other.mWindow;
-    }
-    bool operator > (const ACameraOutputTarget& other) const {
-        return mWindow > other.mWindow;
-    }
-
-    native_handle_ptr_wrapper mWindow;
-};
diff --git a/camera/ndk/ndk_vendor/impl/utils.cpp b/camera/ndk/ndk_vendor/impl/utils.cpp
index 73a527b..3971c73 100644
--- a/camera/ndk/ndk_vendor/impl/utils.cpp
+++ b/camera/ndk/ndk_vendor/impl/utils.cpp
@@ -18,7 +18,6 @@
 
 #include "utils.h"
 
-#include <aidlcommonsupport/NativeHandle.h>
 #include <utils/Log.h>
 
 namespace android {
@@ -138,51 +137,6 @@
     return ret;
 }
 
-bool isWindowNativeHandleEqual(const native_handle_t *nh1, const native_handle_t *nh2) {
-    if (nh1->numFds !=0 || nh2->numFds !=0) {
-        ALOGE("Invalid window native handles being compared");
-        return false;
-    }
-    if (nh1->version != nh2->version || nh1->numFds != nh2->numFds ||
-        nh1->numInts != nh2->numInts) {
-        return false;
-    }
-    for (int i = 0; i < nh1->numInts; i++) {
-        if(nh1->data[i] != nh2->data[i]) {
-            return false;
-        }
-    }
-    return true;
-}
-
-bool isWindowNativeHandleEqual(const native_handle_t *nh1,
-                               const aidl::android::hardware::common::NativeHandle& nh2) {
-    native_handle_t* tempNh = makeFromAidl(nh2);
-    bool equal = isWindowNativeHandleEqual(nh1, tempNh);
-    native_handle_delete(tempNh);
-    return equal;
-}
-
-bool isWindowNativeHandleLessThan(const native_handle_t *nh1, const native_handle_t *nh2) {
-    if (isWindowNativeHandleEqual(nh1, nh2)) {
-        return false;
-    }
-    if (nh1->numInts != nh2->numInts) {
-        return nh1->numInts < nh2->numInts;
-    }
-
-    for (int i = 0; i < nh1->numInts; i++) {
-        if (nh1->data[i] != nh2->data[i]) {
-            return nh1->data[i] < nh2->data[i];
-        }
-    }
-    return false;
-}
-
-bool isWindowNativeHandleGreaterThan(const native_handle_t *nh1, const native_handle_t *nh2) {
-    return !isWindowNativeHandleLessThan(nh1, nh2) && !isWindowNativeHandleEqual(nh1, nh2);
-}
-
 } // namespace utils
 } // namespace acam
 } // namespace android
diff --git a/camera/ndk/ndk_vendor/impl/utils.h b/camera/ndk/ndk_vendor/impl/utils.h
index 7ad74ad..d0dd2fc 100644
--- a/camera/ndk/ndk_vendor/impl/utils.h
+++ b/camera/ndk/ndk_vendor/impl/utils.h
@@ -38,53 +38,14 @@
 using ::aidl::android::frameworks::cameraservice::device::OutputConfiguration;
 using ::aidl::android::frameworks::cameraservice::device::PhysicalCameraSettings;
 using ::aidl::android::frameworks::cameraservice::device::TemplateId;
-using ::aidl::android::hardware::common::NativeHandle;
 using ::android::hardware::camera::common::V1_0::helper::CameraMetadata;
 using AidlCameraMetadata = ::aidl::android::frameworks::cameraservice::device::CameraMetadata;
 using AidlCaptureRequest = ::aidl::android::frameworks::cameraservice::device::CaptureRequest;
 
-bool isWindowNativeHandleEqual(const native_handle_t *nh1, const native_handle_t *nh2);
-
-bool isWindowNativeHandleEqual(const native_handle_t* nh1, const NativeHandle& nh2);
-
-bool isWindowNativeHandleLessThan(const native_handle_t *nh1, const native_handle_t *nh2);
-
-// Convenience wrapper over isWindowNativeHandleLessThan and isWindowNativeHandleEqual
-bool isWindowNativeHandleGreaterThan(const native_handle_t *nh1, const native_handle_t *nh2);
-
-// Utility class so the native_handle_t can be compared with  its contents instead
-// of just raw pointer comparisons.
-struct native_handle_ptr_wrapper {
-    const native_handle_t *mWindow = nullptr;
-
-    native_handle_ptr_wrapper(const native_handle_t *nh) : mWindow(nh) { }
-
-    native_handle_ptr_wrapper() = default;
-
-    operator const native_handle_t *() const { return mWindow; }
-
-    bool operator ==(const native_handle_ptr_wrapper other) const {
-        return isWindowNativeHandleEqual(mWindow, other.mWindow);
-    }
-
-    bool operator != (const native_handle_ptr_wrapper& other) const {
-        return !isWindowNativeHandleEqual(mWindow, other.mWindow);
-    }
-
-    bool operator < (const native_handle_ptr_wrapper& other) const {
-        return isWindowNativeHandleLessThan(mWindow, other.mWindow);
-    }
-
-    bool operator > (const native_handle_ptr_wrapper& other) const {
-        return !isWindowNativeHandleGreaterThan(mWindow, other.mWindow);
-    }
-
-};
-
 // Utility class so that CaptureRequest can be stored by sp<>
 struct CaptureRequest: public RefBase {
   AidlCaptureRequest mCaptureRequest;
-  std::vector<native_handle_ptr_wrapper> mSurfaceList;
+  std::vector<ANativeWindow *> mSurfaceList;
   // Physical camera settings metadata is stored here, as the capture request
   // might not contain it. That's since, fmq might have consumed it.
   std::vector<PhysicalCameraSettings> mPhysicalCameraSettings;
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 74c6cad..0259359 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -31,8 +31,6 @@
 #include <stdio.h>
 
 #include <android/log.h>
-#include <android/hidl/manager/1.2/IServiceManager.h>
-#include <android/hidl/token/1.0/ITokenManager.h>
 #include <camera/NdkCameraError.h>
 #include <camera/NdkCameraManager.h>
 #include <camera/NdkCameraDevice.h>
@@ -40,7 +38,6 @@
 #include <hidl/ServiceManagement.h>
 #include <media/NdkImage.h>
 #include <media/NdkImageReader.h>
-#include <cutils/native_handle.h>
 #include <VendorTagDescriptor.h>
 
 namespace {
@@ -53,9 +50,7 @@
 static constexpr int kTestImageFormat = AIMAGE_FORMAT_YUV_420_888;
 
 using android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
-using android::hidl::manager::V1_0::IServiceManager;
-using android::hidl::token::V1_0::ITokenManager;
-using ConfiguredWindows = std::set<const native_handle_t *>;
+using ConfiguredWindows = std::set<ANativeWindow*>;
 
 class CameraHelper {
    public:
@@ -65,11 +60,11 @@
 
     struct PhysicalImgReaderInfo {
         const char* physicalCameraId;
-        const native_handle_t* anw;
+        ANativeWindow* anw;
     };
 
     // Retaining the error code in case the caller needs to analyze it.
-    std::variant<int, ConfiguredWindows> initCamera(const native_handle_t* imgReaderAnw,
+    std::variant<int, ConfiguredWindows> initCamera(ANativeWindow* imgReaderAnw,
             const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
             bool usePhysicalSettings, bool prepareWindows = false) {
         ConfiguredWindows configuredWindows;
@@ -109,7 +104,7 @@
         }
         configuredWindows.insert(mImgReaderAnw);
         std::vector<const char*> idPointerList;
-        std::set<const native_handle_t*> physicalStreamMap;
+        std::set<ANativeWindow*> physicalStreamMap;
         for (auto& physicalStream : physicalImgReaders) {
             ACaptureSessionOutput* sessionOutput = nullptr;
             ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
@@ -301,7 +296,7 @@
 
 
    private:
-    static void onPreparedCb(void* obj, ACameraWindowType *anw, ACameraCaptureSession *session) {
+    static void onPreparedCb(void* obj, ANativeWindow *anw, ACameraCaptureSession *session) {
         CameraHelper* thiz = reinterpret_cast<CameraHelper*>(obj);
         thiz->handlePrepared(anw, session);
     }
@@ -317,7 +312,7 @@
         return ret;
     }
 
-    void handlePrepared(ACameraWindowType *anw, ACameraCaptureSession *session) {
+    void handlePrepared(ANativeWindow *anw, ACameraCaptureSession *session) {
         // Reduce the pending prepared count of anw by 1. If count is  0, remove the key.
         std::lock_guard<std::mutex> lock(mMutex);
         if (session != mSession) {
@@ -334,7 +329,7 @@
             mPendingPreparedCbs.erase(anw);
         }
     }
-    void incPendingPrepared(ACameraWindowType *anw) {
+    void incPendingPrepared(ANativeWindow *anw) {
         std::lock_guard<std::mutex> lock(mMutex);
         if ((mPendingPreparedCbs.find(anw) == mPendingPreparedCbs.end())) {
             mPendingPreparedCbs[anw] = 1;
@@ -344,13 +339,13 @@
     }
 
     // ANW -> pending prepared callbacks
-    std::unordered_map<ACameraWindowType *, int> mPendingPreparedCbs;
+    std::unordered_map<ANativeWindow*, int> mPendingPreparedCbs;
     ACameraDevice_StateCallbacks mDeviceCb{this, nullptr, nullptr};
     ACameraCaptureSession_stateCallbacks mSessionCb{ this, nullptr, nullptr, nullptr};
 
     ACameraCaptureSession_prepareCallback mPreparedCb = &onPreparedCb;
 
-    const native_handle_t* mImgReaderAnw = nullptr;  // not owned by us.
+    ANativeWindow* mImgReaderAnw = nullptr;  // not owned by us.
 
     // Camera device
     ACameraDevice* mDevice = nullptr;
@@ -484,7 +479,7 @@
     ~ImageReaderTestCase() {
         if (mImgReaderAnw) {
             AImageReader_delete(mImgReader);
-            // No need to call native_handle_t_release on imageReaderAnw
+            // No need to call AImageReader_release(mImgReaderAnw).
         }
     }
 
@@ -514,17 +509,18 @@
             return ret;
         }
 
-        ret = AImageReader_getWindowNativeHandle(mImgReader, &mImgReaderAnw);
+
+        ret = AImageReader_getWindow(mImgReader, &mImgReaderAnw);
         if (ret != AMEDIA_OK || mImgReaderAnw == nullptr) {
-            ALOGE("Failed to get native_handle_t from AImageReader, ret=%d, mImgReaderAnw=%p.", ret,
-                  mImgReaderAnw);
+            ALOGE("Failed to get ANativeWindow* from AImageReader, ret=%d, mImgReader=%p.", ret,
+                  mImgReader);
             return -1;
         }
 
         return 0;
     }
 
-    const native_handle_t* getNativeWindow() { return mImgReaderAnw; }
+    ANativeWindow* getNativeWindow() { return mImgReaderAnw; }
 
     int getAcquiredImageCount() {
         std::lock_guard<std::mutex> lock(mMutex);
@@ -657,7 +653,7 @@
     int mAcquiredImageCount{0};
 
     AImageReader* mImgReader = nullptr;
-    native_handle_t* mImgReaderAnw = nullptr;
+    ANativeWindow* mImgReaderAnw = nullptr;
 
     AImageReader_ImageListener mReaderAvailableCb{this, onImageAvailable};
     AImageReader_BufferRemovedListener mReaderDetachedCb{this, onBufferRemoved};
@@ -985,20 +981,12 @@
 
 
 
-TEST_F(AImageReaderVendorTest, CreateWindowNativeHandle) {
-    auto transport = android::hardware::defaultServiceManager()->getTransport(ITokenManager::descriptor, "default");
-    if (transport.isOk() && transport == IServiceManager::Transport::EMPTY) {
-        GTEST_SKIP() << "This device no longer supports AImageReader_getWindowNativeHandle";
-    }
+TEST_F(AImageReaderVendorTest, CreateANativeWindow) {
     testBasicTakePictures(/*prepareSurfaces*/ false);
     testBasicTakePictures(/*prepareSurfaces*/ true);
 }
 
 TEST_F(AImageReaderVendorTest, LogicalCameraPhysicalStream) {
-    auto transport = android::hardware::defaultServiceManager()->getTransport(ITokenManager::descriptor, "default");
-    if (transport.isOk() && transport == IServiceManager::Transport::EMPTY) {
-        GTEST_SKIP() << "This device no longer supports AImageReader_getWindowNativeHandle";
-    }
     for (auto & v2 : {true, false}) {
         testLogicalCameraPhysicalStream(false/*usePhysicalSettings*/, v2);
         testLogicalCameraPhysicalStream(true/*usePhysicalSettings*/, v2);
diff --git a/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp b/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
index 07efc20..8371905 100644
--- a/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
@@ -16,14 +16,19 @@
 
 #include <CameraParameters.h>
 #include <CameraParameters2.h>
+#include <camera/StringUtils.h>
 #include <fcntl.h>
 #include <fuzzer/FuzzedDataProvider.h>
 #include <utils/String16.h>
 #include <camera/StringUtils.h>
 
+#include <functional>
+
 using namespace std;
 using namespace android;
 
+constexpr int8_t kMaxBytes = 20;
+
 string kValidFormats[] = {
         CameraParameters::PIXEL_FORMAT_YUV422SP,      CameraParameters::PIXEL_FORMAT_YUV420SP,
         CameraParameters::PIXEL_FORMAT_YUV422I,       CameraParameters::PIXEL_FORMAT_YUV420P,
@@ -34,26 +39,22 @@
 class CameraParametersFuzzer {
   public:
     void process(const uint8_t* data, size_t size);
-    ~CameraParametersFuzzer() {
-        delete mCameraParameters;
-        delete mCameraParameters2;
-    }
 
   private:
     void invokeCameraParameters();
     template <class type>
-    void initCameraParameters(type** obj);
+    void initCameraParameters(unique_ptr<type>& obj);
     template <class type>
-    void cameraParametersCommon(type* obj);
-    CameraParameters* mCameraParameters = nullptr;
-    CameraParameters2* mCameraParameters2 = nullptr;
+    void callCameraParametersAPIs(unique_ptr<type>& obj);
+    unique_ptr<CameraParameters> mCameraParameters;
+    unique_ptr<CameraParameters2> mCameraParameters2;
     FuzzedDataProvider* mFDP = nullptr;
 };
 
 template <class type>
-void CameraParametersFuzzer::initCameraParameters(type** obj) {
+void CameraParametersFuzzer::initCameraParameters(unique_ptr<type>& obj) {
     if (mFDP->ConsumeBool()) {
-        *obj = new type();
+        obj = make_unique<type>();
     } else {
         string params;
         if (mFDP->ConsumeBool()) {
@@ -61,94 +62,176 @@
             int32_t height = mFDP->ConsumeIntegral<int32_t>();
             int32_t minFps = mFDP->ConsumeIntegral<int32_t>();
             int32_t maxFps = mFDP->ConsumeIntegral<int32_t>();
-            params = CameraParameters::KEY_SUPPORTED_VIDEO_SIZES;
+            params = mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                         : CameraParameters::KEY_SUPPORTED_VIDEO_SIZES;
             params += '=' + to_string(width) + 'x' + to_string(height) + ';';
             if (mFDP->ConsumeBool()) {
-                params += CameraParameters::KEY_PREVIEW_FPS_RANGE;
+                params += mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                              : CameraParameters::KEY_PREVIEW_FPS_RANGE;
                 params += '=' + to_string(minFps) + ',' + to_string(maxFps) + ';';
             }
             if (mFDP->ConsumeBool()) {
-                params += CameraParameters::KEY_SUPPORTED_PICTURE_SIZES;
+                params += mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                              : CameraParameters::KEY_SUPPORTED_PICTURE_SIZES;
                 params += '=' + to_string(width) + 'x' + to_string(height) + ';';
             }
             if (mFDP->ConsumeBool()) {
-                params += CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS;
-                params += '=' + mFDP->PickValueInArray(kValidFormats) + ';';
+                params += mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                              : CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS;
+                params += '=' +
+                          (mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                               : mFDP->PickValueInArray(kValidFormats)) + ';';
             }
         } else {
-            params = mFDP->ConsumeRandomLengthString();
+            params = mFDP->ConsumeRandomLengthString(kMaxBytes);
         }
-        *obj = new type(toString8(params));
+        obj = make_unique<type>(toString8(params));
     }
 }
 
 template <class type>
-void CameraParametersFuzzer::cameraParametersCommon(type* obj) {
-    Vector<Size> supportedPreviewSizes;
-    obj->getSupportedPreviewSizes(supportedPreviewSizes);
-    int32_t previewWidth = mFDP->ConsumeIntegral<int32_t>();
-    int32_t previewHeight = mFDP->ConsumeIntegral<int32_t>();
-    obj->setPreviewSize(previewWidth, previewHeight);
-    obj->getPreviewSize(&previewWidth, &previewHeight);
-
+void CameraParametersFuzzer::callCameraParametersAPIs(unique_ptr<type>& obj) {
     Vector<Size> supportedVideoSizes;
-    obj->getSupportedVideoSizes(supportedVideoSizes);
-    if (supportedVideoSizes.size() != 0) {
-        int32_t videoWidth, videoHeight, preferredVideoWidth, preferredVideoHeight;
-        if (mFDP->ConsumeBool()) {
-            int32_t idx = mFDP->ConsumeIntegralInRange<int32_t>(0, supportedVideoSizes.size() - 1);
-            obj->setVideoSize(supportedVideoSizes[idx].width, supportedVideoSizes[idx].height);
-        } else {
-            videoWidth = mFDP->ConsumeIntegral<int32_t>();
-            videoHeight = mFDP->ConsumeIntegral<int32_t>();
-            obj->setVideoSize(videoWidth, videoHeight);
-        }
-        obj->getVideoSize(&videoWidth, &videoHeight);
-        obj->getPreferredPreviewSizeForVideo(&preferredVideoWidth, &preferredVideoHeight);
-    }
-
-    int32_t fps = mFDP->ConsumeIntegral<int32_t>();
-    obj->setPreviewFrameRate(fps);
-    obj->getPreviewFrameRate();
-    string previewFormat = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
-                                               : mFDP->ConsumeRandomLengthString();
-    obj->setPreviewFormat(previewFormat.c_str());
-
-    int32_t pictureWidth = mFDP->ConsumeIntegral<int32_t>();
-    int32_t pictureHeight = mFDP->ConsumeIntegral<int32_t>();
-    Vector<Size> supportedPictureSizes;
-    obj->setPictureSize(pictureWidth, pictureHeight);
-    obj->getPictureSize(&pictureWidth, &pictureHeight);
-    obj->getSupportedPictureSizes(supportedPictureSizes);
-    string pictureFormat = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
-                                               : mFDP->ConsumeRandomLengthString();
-    obj->setPictureFormat(pictureFormat.c_str());
-    obj->getPictureFormat();
-
-    if (mFDP->ConsumeBool()) {
-        obj->dump();
-    } else {
-        int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
-        Vector<String16> args = {};
-        obj->dump(fd, args);
-        close(fd);
+    while (mFDP->remaining_bytes()) {
+        auto callCameraUtilsAPIs = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() {
+                    Vector<Size> supportedPreviewSizes;
+                    obj->getSupportedPreviewSizes(supportedPreviewSizes);
+                },
+                [&]() {
+                    int32_t previewWidth = mFDP->ConsumeIntegral<int32_t>();
+                    int32_t previewHeight = mFDP->ConsumeIntegral<int32_t>();
+                    obj->setPreviewSize(previewWidth, previewHeight);
+                },
+                [&]() {
+                    int32_t previewWidth, previewHeight;
+                    obj->getPreviewSize(&previewWidth, &previewHeight);
+                },
+                [&]() { obj->getSupportedVideoSizes(supportedVideoSizes); },
+                [&]() {
+                    int32_t videoWidth, videoHeight;
+                    if (supportedVideoSizes.size()) {
+                        int32_t idx = mFDP->ConsumeIntegralInRange<int32_t>(
+                                0, supportedVideoSizes.size() - 1);
+                        videoWidth = mFDP->ConsumeBool() ? supportedVideoSizes[idx].width
+                                                         : mFDP->ConsumeIntegral<int32_t>();
+                        videoHeight = mFDP->ConsumeBool() ? supportedVideoSizes[idx].height
+                                                          : mFDP->ConsumeIntegral<int32_t>();
+                        obj->setVideoSize(videoWidth, videoHeight);
+                    }
+                },
+                [&]() {
+                    int32_t videoWidth, videoHeight;
+                    obj->getVideoSize(&videoWidth, &videoHeight);
+                },
+                [&]() {
+                    int32_t preferredVideoWidth, preferredVideoHeight;
+                    obj->getPreferredPreviewSizeForVideo(&preferredVideoWidth,
+                                                         &preferredVideoHeight);
+                },
+                [&]() {
+                    int32_t fps = mFDP->ConsumeIntegral<int32_t>();
+                    obj->setPreviewFrameRate(fps);
+                },
+                [&]() { obj->getPreviewFrameRate(); },
+                [&]() {
+                    string previewFormat = mFDP->ConsumeBool()
+                                                   ? mFDP->PickValueInArray(kValidFormats)
+                                                   : mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->setPreviewFormat(previewFormat.c_str());
+                },
+                [&]() {
+                    int32_t pictureWidth = mFDP->ConsumeIntegral<int32_t>();
+                    int32_t pictureHeight = mFDP->ConsumeIntegral<int32_t>();
+                    obj->setPictureSize(pictureWidth, pictureHeight);
+                },
+                [&]() {
+                    int32_t pictureWidth, pictureHeight;
+                    obj->getPictureSize(&pictureWidth, &pictureHeight);
+                },
+                [&]() {
+                    Vector<Size> supportedPictureSizes;
+                    obj->getSupportedPictureSizes(supportedPictureSizes);
+                },
+                [&]() {
+                    string pictureFormat = mFDP->ConsumeBool()
+                                                   ? mFDP->PickValueInArray(kValidFormats)
+                                                   : mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->setPictureFormat(pictureFormat.c_str());
+                },
+                [&]() { obj->getPictureFormat(); },
+                [&]() {
+                    if (mFDP->ConsumeBool()) {
+                        obj->dump();
+                    } else {
+                        int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+                        Vector<String16> args = {};
+                        obj->dump(fd, args);
+                        close(fd);
+                    }
+                },
+                [&]() { obj->flatten(); },
+                [&]() {
+                    string key = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    float value = mFDP->ConsumeFloatingPoint<float>();
+                    obj->setFloat(key.c_str(), value);
+                },
+                [&]() {
+                    string key = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->getFloat(key.c_str());
+                },
+                [&]() { obj->getPreviewFormat(); },
+                [&]() {
+                    string key = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->remove(key.c_str());
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters>) {
+                        string format = mFDP->ConsumeBool()
+                                                ? mFDP->ConsumeRandomLengthString(kMaxBytes)
+                                                : mFDP->PickValueInArray(kValidFormats);
+                        mCameraParameters->previewFormatToEnum(format.c_str());
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters>) {
+                        mCameraParameters->isEmpty();
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters>) {
+                        Vector<int32_t> formats;
+                        mCameraParameters->getSupportedPreviewFormats(formats);
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters2>) {
+                        string key1 = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        string key2 = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        int32_t order;
+                        mCameraParameters2->compareSetOrder(key1.c_str(), key2.c_str(), &order);
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters2>) {
+                        int32_t minFps = mFDP->ConsumeIntegral<int32_t>();
+                        int32_t maxFps = mFDP->ConsumeIntegral<int32_t>();
+                        mCameraParameters2->setPreviewFpsRange(minFps, maxFps);
+                    }
+                },
+        });
+        callCameraUtilsAPIs();
     }
 }
 
 void CameraParametersFuzzer::invokeCameraParameters() {
-    initCameraParameters<CameraParameters>(&mCameraParameters);
-    cameraParametersCommon<CameraParameters>(mCameraParameters);
-    initCameraParameters<CameraParameters2>(&mCameraParameters2);
-    cameraParametersCommon<CameraParameters2>(mCameraParameters2);
-
-    int32_t minFPS, maxFPS;
-    mCameraParameters->getPreviewFpsRange(&minFPS, &maxFPS);
-    string format = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
-                                        : mFDP->ConsumeRandomLengthString();
-    mCameraParameters->previewFormatToEnum(format.c_str());
-    mCameraParameters->isEmpty();
-    Vector<int32_t> formats;
-    mCameraParameters->getSupportedPreviewFormats(formats);
+    if (mFDP->ConsumeBool()) {
+        initCameraParameters<CameraParameters>(mCameraParameters);
+        callCameraParametersAPIs(mCameraParameters);
+    } else {
+        initCameraParameters<CameraParameters2>(mCameraParameters2);
+        callCameraParametersAPIs(mCameraParameters2);
+    }
 }
 
 void CameraParametersFuzzer::process(const uint8_t* data, size_t size) {
diff --git a/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp b/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
index 494ec1b..5ad9530 100644
--- a/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
@@ -44,7 +44,7 @@
     }
 
     for (size_t idx = 0; idx < physicalCameraSettingsSize; ++idx) {
-        string id = fdp.ConsumeRandomLengthString();
+        string id = fdp.ConsumeRandomLengthString(kMaxBytes);
         if (fdp.ConsumeBool()) {
             parcelCamCaptureReq.writeString16(toString16(id));
         }
@@ -120,7 +120,11 @@
         }
     }
 
-    invokeReadWriteParcelsp<CaptureRequest>(captureRequest);
+    if (fdp.ConsumeBool()) {
+        invokeReadWriteParcelsp<CaptureRequest>(captureRequest);
+    } else {
+        invokeNewReadWriteParcelsp<CaptureRequest>(captureRequest, fdp);
+    }
     invokeReadWriteNullParcelsp<CaptureRequest>(captureRequest);
     parcelCamCaptureReq.setDataPosition(0);
     captureRequest->readFromParcel(&parcelCamCaptureReq);
diff --git a/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp b/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
index 2fe9a94..7046075 100644
--- a/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
@@ -26,85 +26,122 @@
 using namespace android;
 using namespace android::hardware::camera2::params;
 
+constexpr int8_t kMaxLoopIterations = 100;
 constexpr int32_t kSizeMin = 0;
 constexpr int32_t kSizeMax = 1000;
 
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
-    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+class C2OutputConfigurationFuzzer {
+  public:
+    void process(const uint8_t* data, size_t size);
 
-    OutputConfiguration* outputConfiguration = nullptr;
+  private:
+    void invokeC2OutputConfigFuzzer();
+    unique_ptr<OutputConfiguration> getC2OutputConfig();
+    sp<IGraphicBufferProducer> createIGraphicBufferProducer();
+    FuzzedDataProvider* mFDP = nullptr;
+};
 
-    if (fdp.ConsumeBool()) {
-        outputConfiguration = new OutputConfiguration();
+sp<IGraphicBufferProducer> C2OutputConfigurationFuzzer::createIGraphicBufferProducer() {
+    sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+    sp<SurfaceControl> surfaceControl = composerClient->createSurface(
+            static_cast<String8>(mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()) /* name */,
+            mFDP->ConsumeIntegral<uint32_t>() /* width */,
+            mFDP->ConsumeIntegral<uint32_t>() /* height */,
+            mFDP->ConsumeIntegral<int32_t>() /* format */,
+            mFDP->ConsumeIntegral<int32_t>() /* flags */);
+    if (surfaceControl) {
+        sp<Surface> surface = surfaceControl->getSurface();
+        return surface->getIGraphicBufferProducer();
     } else {
-        int32_t rotation = fdp.ConsumeIntegral<int32_t>();
-        string physicalCameraId = fdp.ConsumeRandomLengthString();
-        int32_t surfaceSetID = fdp.ConsumeIntegral<int32_t>();
-        bool isShared = fdp.ConsumeBool();
-
-        if (fdp.ConsumeBool()) {
-            sp<IGraphicBufferProducer> iGBP = nullptr;
-            sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
-            sp<SurfaceControl> surfaceControl = composerClient->createSurface(
-                    static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
-                    fdp.ConsumeIntegral<uint32_t>() /* width */,
-                    fdp.ConsumeIntegral<uint32_t>() /* height */,
-                    fdp.ConsumeIntegral<int32_t>() /* format */,
-                    fdp.ConsumeIntegral<int32_t>() /* flags */);
-            if (surfaceControl) {
-                sp<Surface> surface = surfaceControl->getSurface();
-                iGBP = surface->getIGraphicBufferProducer();
-            }
-            outputConfiguration = new OutputConfiguration(iGBP, rotation, physicalCameraId,
-                                                          surfaceSetID, isShared);
-            iGBP.clear();
-            composerClient.clear();
-            surfaceControl.clear();
-        } else {
-            size_t iGBPSize = fdp.ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
-            vector<sp<IGraphicBufferProducer>> iGBPs;
-            for (size_t idx = 0; idx < iGBPSize; ++idx) {
-                sp<IGraphicBufferProducer> iGBP = nullptr;
-                sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
-                sp<SurfaceControl> surfaceControl = composerClient->createSurface(
-                        static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
-                        fdp.ConsumeIntegral<uint32_t>() /* width */,
-                        fdp.ConsumeIntegral<uint32_t>() /* height */,
-                        fdp.ConsumeIntegral<int32_t>() /* format */,
-                        fdp.ConsumeIntegral<int32_t>() /* flags */);
-                if (surfaceControl) {
-                    sp<Surface> surface = surfaceControl->getSurface();
-                    iGBP = surface->getIGraphicBufferProducer();
-                    iGBPs.push_back(iGBP);
-                }
-                iGBP.clear();
-                composerClient.clear();
-                surfaceControl.clear();
-            }
-            outputConfiguration = new OutputConfiguration(iGBPs, rotation, physicalCameraId,
-                                                          surfaceSetID, isShared);
-        }
+        sp<IGraphicBufferProducer> gbp;
+        return gbp;
     }
+}
 
-    outputConfiguration->getRotation();
-    outputConfiguration->getSurfaceSetID();
-    outputConfiguration->getSurfaceType();
-    outputConfiguration->getWidth();
-    outputConfiguration->getHeight();
-    outputConfiguration->isDeferred();
-    outputConfiguration->isShared();
-    outputConfiguration->getPhysicalCameraId();
+unique_ptr<OutputConfiguration> C2OutputConfigurationFuzzer::getC2OutputConfig() {
+    unique_ptr<OutputConfiguration> outputConfiguration = nullptr;
+    auto selectOutputConfigurationConstructor =
+            mFDP->PickValueInArray<const std::function<void()>>({
+                    [&]() { outputConfiguration = make_unique<OutputConfiguration>(); },
 
-    OutputConfiguration outputConfiguration2;
-    outputConfiguration->gbpsEqual(outputConfiguration2);
-    outputConfiguration->sensorPixelModesUsedEqual(outputConfiguration2);
-    outputConfiguration->gbpsLessThan(outputConfiguration2);
-    outputConfiguration->sensorPixelModesUsedLessThan(outputConfiguration2);
-    outputConfiguration->getGraphicBufferProducers();
-    sp<IGraphicBufferProducer> gbp;
-    outputConfiguration->addGraphicProducer(gbp);
-    invokeReadWriteNullParcel<OutputConfiguration>(outputConfiguration);
-    invokeReadWriteParcel<OutputConfiguration>(outputConfiguration);
-    delete outputConfiguration;
+                    [&]() {
+                        int32_t rotation = mFDP->ConsumeIntegral<int32_t>();
+                        string physicalCameraId = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        int32_t surfaceSetID = mFDP->ConsumeIntegral<int32_t>();
+                        bool isShared = mFDP->ConsumeBool();
+                        sp<IGraphicBufferProducer> iGBP = createIGraphicBufferProducer();
+                        outputConfiguration = make_unique<OutputConfiguration>(
+                                iGBP, rotation, physicalCameraId, surfaceSetID, isShared);
+                    },
+
+                    [&]() {
+                        int32_t rotation = mFDP->ConsumeIntegral<int32_t>();
+                        string physicalCameraId = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        int32_t surfaceSetID = mFDP->ConsumeIntegral<int32_t>();
+                        bool isShared = mFDP->ConsumeBool();
+                        size_t iGBPSize = mFDP->ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+                        vector<sp<IGraphicBufferProducer>> iGBPs;
+                        for (size_t idx = 0; idx < iGBPSize; ++idx) {
+                            sp<IGraphicBufferProducer> iGBP = createIGraphicBufferProducer();
+                            iGBPs.push_back(iGBP);
+                        }
+                        outputConfiguration = make_unique<OutputConfiguration>(
+                                iGBPs, rotation, physicalCameraId, surfaceSetID, isShared);
+                    },
+            });
+    selectOutputConfigurationConstructor();
+    return outputConfiguration;
+}
+
+void C2OutputConfigurationFuzzer::invokeC2OutputConfigFuzzer() {
+    unique_ptr<OutputConfiguration> outputConfiguration = getC2OutputConfig();
+    int8_t count = kMaxLoopIterations;
+    while (--count > 0) {
+    unique_ptr<OutputConfiguration> outputConfiguration2 = getC2OutputConfig();
+        auto callC2OutputConfAPIs = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() { outputConfiguration->getRotation(); },
+                [&]() { outputConfiguration->getSurfaceSetID(); },
+                [&]() { outputConfiguration->getSurfaceType(); },
+                [&]() { outputConfiguration->getWidth(); },
+                [&]() { outputConfiguration->getHeight(); },
+                [&]() { outputConfiguration->isDeferred(); },
+                [&]() { outputConfiguration->isShared(); },
+                [&]() { outputConfiguration->getPhysicalCameraId(); },
+                [&]() { outputConfiguration->gbpsEqual(*outputConfiguration2); },
+                [&]() { outputConfiguration->sensorPixelModesUsedEqual(*outputConfiguration2); },
+                [&]() { outputConfiguration->gbpsLessThan(*outputConfiguration2); },
+                [&]() { outputConfiguration->sensorPixelModesUsedLessThan(*outputConfiguration2); },
+                [&]() { outputConfiguration->getGraphicBufferProducers(); },
+                [&]() {
+                    sp<IGraphicBufferProducer> gbp = createIGraphicBufferProducer();
+                    outputConfiguration->addGraphicProducer(gbp);
+                },
+                [&]() { outputConfiguration->isMultiResolution(); },
+                [&]() { outputConfiguration->getColorSpace(); },
+                [&]() { outputConfiguration->getStreamUseCase(); },
+                [&]() { outputConfiguration->getTimestampBase(); },
+                [&]() { outputConfiguration->getMirrorMode(); },
+                [&]() { outputConfiguration->useReadoutTimestamp(); },
+        });
+        callC2OutputConfAPIs();
+    }
+    // Not keeping invokeReadWrite() APIs in while loop to avoid possible OOM.
+    invokeReadWriteNullParcel<OutputConfiguration>(outputConfiguration.get());
+    if (mFDP->ConsumeBool()) {
+        invokeReadWriteParcel<OutputConfiguration>(outputConfiguration.get());
+    } else {
+        invokeNewReadWriteParcel<OutputConfiguration>(outputConfiguration.get(), *mFDP);
+    }
+}
+
+void C2OutputConfigurationFuzzer::process(const uint8_t* data, size_t size) {
+    mFDP = new FuzzedDataProvider(data, size);
+    invokeC2OutputConfigFuzzer();
+    delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    C2OutputConfigurationFuzzer c2OutputConfigurationFuzzer;
+    c2OutputConfigurationFuzzer.process(data, size);
     return 0;
 }
diff --git a/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp b/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
index dc40b0f..c588f11 100644
--- a/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
@@ -27,6 +27,10 @@
     SubmitInfo submitInfo;
     submitInfo.mRequestId = fdp.ConsumeIntegral<int32_t>();
     submitInfo.mLastFrameNumber = fdp.ConsumeIntegral<int64_t>();
-    invokeReadWriteParcel<SubmitInfo>(&submitInfo);
+    if (fdp.ConsumeBool()) {
+        invokeReadWriteParcel<SubmitInfo>(&submitInfo);
+    } else {
+        invokeNewReadWriteParcel<SubmitInfo>(&submitInfo, fdp);
+    }
     return 0;
 }
diff --git a/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp b/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
index e14d9ce..3131f1d 100644
--- a/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
@@ -29,6 +29,8 @@
 constexpr int32_t kRangeMin = 0;
 constexpr int32_t kRangeMax = 1000;
 constexpr int32_t kVendorTagDescriptorId = -1;
+constexpr int8_t kMinLoopIterations = 1;
+constexpr int8_t kMaxLoopIterations = 50;
 
 extern "C" {
 
@@ -95,39 +97,63 @@
     initVendorTagDescriptor();
 
     sp<VendorTagDescriptor> vdesc = new VendorTagDescriptor();
-    vdesc->copyFrom(*mVendorTagDescriptor);
-    VendorTagDescriptor::setAsGlobalVendorTagDescriptor(mVendorTagDescriptor);
-    VendorTagDescriptor::getGlobalVendorTagDescriptor();
 
-    int32_t tagCount = mVendorTagDescriptor->getTagCount();
-    if (tagCount > 0) {
-        uint32_t tagArray[tagCount];
-        mVendorTagDescriptor->getTagArray(tagArray);
-        uint32_t tag;
-        for (int32_t i = 0; i < tagCount; ++i) {
-            tag = tagArray[i];
-            get_local_camera_metadata_section_name_vendor_id(tag, kVendorTagDescriptorId);
-            get_local_camera_metadata_tag_name_vendor_id(tag, kVendorTagDescriptorId);
-            get_local_camera_metadata_tag_type_vendor_id(tag, kVendorTagDescriptorId);
-            mVendorTagDescriptor->getSectionIndex(tag);
-        }
-        mVendorTagDescriptor->getAllSectionNames();
+    int8_t count = mFDP->ConsumeIntegralInRange<int8_t>(kMinLoopIterations, kMaxLoopIterations);
+    while (--count > 0) {
+        auto callVendorTagDescriptor = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() {
+                    int32_t tagCount = mVendorTagDescriptor->getTagCount();
+                    if (tagCount > 0) {
+                        uint32_t tagArray[tagCount];
+                        mVendorTagDescriptor->getTagArray(tagArray);
+                        uint32_t tag;
+                        for (int32_t i = 0; i < tagCount; ++i) {
+                            tag = tagArray[i];
+                            get_local_camera_metadata_section_name_vendor_id(
+                                    tag, kVendorTagDescriptorId);
+                            get_local_camera_metadata_tag_name_vendor_id(tag,
+                                                                         kVendorTagDescriptorId);
+                            get_local_camera_metadata_tag_type_vendor_id(tag,
+                                                                         kVendorTagDescriptorId);
+                            mVendorTagDescriptor->getSectionIndex(tag);
+                        }
+                    }
+                },
+                [&]() {
+                    if (mVendorTagDescriptor->getTagCount() > 0) {
+                        mVendorTagDescriptor->getAllSectionNames();
+                    }
+                },
+                [&]() { vdesc->copyFrom(*mVendorTagDescriptor); },
+                [&]() {
+                    VendorTagDescriptor::setAsGlobalVendorTagDescriptor(mVendorTagDescriptor);
+                },
+                [&]() { VendorTagDescriptor::getGlobalVendorTagDescriptor(); },
+                [&]() {
+                    String8 name((mFDP->ConsumeRandomLengthString()).c_str());
+                    String8 section((mFDP->ConsumeRandomLengthString()).c_str());
+                    uint32_t lookupTag;
+                    mVendorTagDescriptor->lookupTag(name, section, &lookupTag);
+                },
+                [&]() {
+                    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+                    int32_t verbosity = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+                    int32_t indentation =
+                            mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+                    mVendorTagDescriptor->dump(fd, verbosity, indentation);
+                    close(fd);
+                },
+        });
+        callVendorTagDescriptor();
     }
 
-    String8 name((mFDP->ConsumeRandomLengthString()).c_str());
-    String8 section((mFDP->ConsumeRandomLengthString()).c_str());
-    uint32_t lookupTag;
-    mVendorTagDescriptor->lookupTag(name, section, &lookupTag);
-
-    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
-    int32_t verbosity = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
-    int32_t indentation = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
-    mVendorTagDescriptor->dump(fd, verbosity, indentation);
-
-    invokeReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor);
+    // Do not keep invokeReadWrite() APIs in while loop to avoid possible OOM.
+    if (mFDP->ConsumeBool()) {
+        invokeReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor);
+    } else {
+        invokeNewReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor, *mFDP);
+    }
     VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-    vdesc.clear();
-    close(fd);
 }
 
 void VendorTagDescriptorFuzzer::invokeVendorTagDescriptorCache() {
@@ -135,36 +161,52 @@
     uint64_t id = mFDP->ConsumeIntegral<uint64_t>();
     initVendorTagDescriptor();
 
-    mVendorTagDescriptorCache->addVendorDescriptor(id, mVendorTagDescriptor);
-    VendorTagDescriptorCache::setAsGlobalVendorTagCache(mVendorTagDescriptorCache);
-    VendorTagDescriptorCache::getGlobalVendorTagCache();
-    sp<VendorTagDescriptor> tagDesc;
-    mVendorTagDescriptorCache->getVendorTagDescriptor(id, &tagDesc);
-
-    int32_t tagCount = mVendorTagDescriptorCache->getTagCount(id);
-    if (tagCount > 0) {
-        uint32_t tagArray[tagCount];
-        mVendorTagDescriptorCache->getTagArray(tagArray, id);
-        uint32_t tag;
-        for (int32_t i = 0; i < tagCount; ++i) {
-            tag = tagArray[i];
-            get_local_camera_metadata_section_name_vendor_id(tag, id);
-            get_local_camera_metadata_tag_name_vendor_id(tag, id);
-            get_local_camera_metadata_tag_type_vendor_id(tag, id);
-        }
+    int8_t count = mFDP->ConsumeIntegralInRange<int8_t>(kMinLoopIterations, kMaxLoopIterations);
+    while (--count > 0) {
+        auto callVendorTagDescriptorCache = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() { mVendorTagDescriptorCache->addVendorDescriptor(id, mVendorTagDescriptor); },
+                [&]() {
+                    VendorTagDescriptorCache::setAsGlobalVendorTagCache(mVendorTagDescriptorCache);
+                },
+                [&]() { VendorTagDescriptorCache::getGlobalVendorTagCache(); },
+                [&]() {
+                    sp<VendorTagDescriptor> tagDesc;
+                    mVendorTagDescriptorCache->getVendorTagDescriptor(id, &tagDesc);
+                },
+                [&]() {
+                    int32_t tagCount = mVendorTagDescriptorCache->getTagCount(id);
+                    if (tagCount > 0) {
+                        uint32_t tagArray[tagCount];
+                        mVendorTagDescriptorCache->getTagArray(tagArray, id);
+                        uint32_t tag;
+                        for (int32_t i = 0; i < tagCount; ++i) {
+                            tag = tagArray[i];
+                            get_local_camera_metadata_section_name_vendor_id(tag, id);
+                            get_local_camera_metadata_tag_name_vendor_id(tag, id);
+                            get_local_camera_metadata_tag_type_vendor_id(tag, id);
+                        }
+                    }
+                },
+                [&]() {
+                    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+                    int32_t verbosity = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
+                    int32_t indentation = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
+                    mVendorTagDescriptorCache->dump(fd, verbosity, indentation);
+                    close(fd);
+                },
+                [&]() { VendorTagDescriptorCache::isVendorCachePresent(id); },
+                [&]() { mVendorTagDescriptorCache->getVendorIdsAndTagDescriptors(); },
+        });
+        callVendorTagDescriptorCache();
     }
 
-    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
-    int32_t verbosity = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
-    int32_t indentation = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
-    mVendorTagDescriptorCache->dump(fd, verbosity, indentation);
-
-    invokeReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache);
-    VendorTagDescriptorCache::isVendorCachePresent(id);
-    mVendorTagDescriptorCache->getVendorIdsAndTagDescriptors();
+    // Do not keep invokeReadWrite() APIs in while loop to avoid possible OOM.
+    if (mFDP->ConsumeBool()) {
+        invokeReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache);
+    } else {
+        invokeNewReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache, *mFDP);
+    }
     mVendorTagDescriptorCache->clearGlobalVendorTagCache();
-    tagDesc.clear();
-    close(fd);
 }
 
 void VendorTagDescriptorFuzzer::invokeVendorTagErrorConditions() {
@@ -177,26 +219,39 @@
         VendorTagDescriptor::createDescriptorFromOps(/*vOps*/ NULL, vDesc);
     } else {
         VendorTagDescriptor::createDescriptorFromOps(&vOps, vDesc);
-        int32_t tagCount = vDesc->getTagCount();
-        uint32_t badTag = mFDP->ConsumeIntegral<uint32_t>();
-        uint32_t badTagArray[tagCount + 1];
-        vDesc->getTagArray(badTagArray);
-        vDesc->getSectionName(badTag);
-        vDesc->getTagName(badTag);
-        vDesc->getTagType(badTag);
-        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-        VendorTagDescriptor::getGlobalVendorTagDescriptor();
-        VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc);
+
+        int8_t count = mFDP->ConsumeIntegralInRange<int8_t>(kMinLoopIterations, kMaxLoopIterations);
+        while (--count > 0) {
+            int32_t tagCount = vDesc->getTagCount();
+            uint32_t badTag = mFDP->ConsumeIntegral<uint32_t>();
+            uint32_t badTagArray[tagCount + 1];
+            auto callVendorTagErrorConditions =
+                    mFDP->PickValueInArray<const std::function<void()>>({
+                            [&]() { vDesc->getTagArray(badTagArray); },
+                            [&]() { vDesc->getSectionName(badTag); },
+                            [&]() { vDesc->getTagName(badTag); },
+                            [&]() { vDesc->getTagType(badTag); },
+                            [&]() { VendorTagDescriptor::clearGlobalVendorTagDescriptor(); },
+                            [&]() { VendorTagDescriptor::getGlobalVendorTagDescriptor(); },
+                            [&]() { VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc); },
+                    });
+            callVendorTagErrorConditions();
+        }
         invokeReadWriteNullParcelsp<VendorTagDescriptor>(vDesc);
-        vDesc.clear();
     }
+    vDesc.clear();
 }
 
 void VendorTagDescriptorFuzzer::process(const uint8_t* data, size_t size) {
     mFDP = new FuzzedDataProvider(data, size);
-    invokeVendorTagDescriptor();
-    invokeVendorTagDescriptorCache();
-    invokeVendorTagErrorConditions();
+    while (mFDP->remaining_bytes()) {
+        auto invokeVendorTagDescriptorFuzzer = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() { invokeVendorTagDescriptor(); },
+                [&]() { invokeVendorTagDescriptorCache(); },
+                [&]() { invokeVendorTagErrorConditions(); },
+        });
+        invokeVendorTagDescriptorFuzzer();
+    }
     delete mFDP;
 }
 
diff --git a/media/audio/aconfig/Android.bp b/media/audio/aconfig/Android.bp
index 1c1ac0e..39a1544 100644
--- a/media/audio/aconfig/Android.bp
+++ b/media/audio/aconfig/Android.bp
@@ -8,18 +8,21 @@
 aconfig_declarations {
     name: "com.android.media.audioserver-aconfig",
     package: "com.android.media.audioserver",
+    container: "system",
     srcs: ["audioserver.aconfig"],
 }
 
 aconfig_declarations {
     name: "com.android.media.audio-aconfig",
     package: "com.android.media.audio",
+    container: "system",
     srcs: ["audio.aconfig"],
 }
 
 aconfig_declarations {
     name: "com.android.media.aaudio-aconfig",
     package: "com.android.media.aaudio",
+    container: "system",
     srcs: ["aaudio.aconfig"],
 }
 
@@ -43,6 +46,18 @@
     name: "com.android.media.audio-aconfig-cc",
     aconfig_declarations: "com.android.media.audio-aconfig",
     defaults: ["audio-aconfig-cc-defaults"],
+    double_loadable: true,
+    host_supported: true,
+    product_available: true,
+    vendor_available: true,
+    // TODO(b/316909431) native_bridge_supported: true,
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+        "com.android.media.swcodec",
+        "com.android.btservices",
+    ],
+    min_sdk_version: "29",
 }
 
 cc_aconfig_library {
@@ -56,6 +71,12 @@
     aconfig_declarations: "com.android.media.audio-aconfig",
 }
 
+// For CTS usage
+java_aconfig_library {
+    name: "com.android.media.audioserver-aconfig-java",
+    aconfig_declarations: "com.android.media.audioserver-aconfig",
+}
+
 // Framework available flags to follow
 // Care must be taken to avoid namespace conflicts.
 // These flags are accessible outside of the platform! Limit usage to @FlaggedApi wherever possible
@@ -63,22 +84,25 @@
 aconfig_declarations {
     name: "android.media.audio-aconfig",
     package: "android.media.audio",
+    container: "system",
     srcs: ["audio_framework.aconfig"],
-    visibility: ["//visibility:private"],
+    visibility: ["//frameworks/base/api"],
 }
 
 aconfig_declarations {
     name: "android.media.audiopolicy-aconfig",
     package: "android.media.audiopolicy",
+    container: "system",
     srcs: ["audiopolicy_framework.aconfig"],
-    visibility: ["//visibility:private"],
+    visibility: ["//frameworks/base/api"],
 }
 
 aconfig_declarations {
     name: "android.media.midi-aconfig",
     package: "android.media.midi",
+    container: "system",
     srcs: ["midi_flags.aconfig"],
-    visibility: ["//visibility:private"],
+    visibility: ["//frameworks/base/api"],
 }
 
 java_aconfig_library {
@@ -91,6 +115,11 @@
     name: "android.media.audiopolicy-aconfig-java",
     aconfig_declarations: "android.media.audiopolicy-aconfig",
     defaults: ["framework-minus-apex-aconfig-java-defaults"],
+    min_sdk_version: "VanillaIceCream",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.car.framework",
+    ],
 }
 
 java_aconfig_library {
@@ -99,6 +128,12 @@
     defaults: ["framework-minus-apex-aconfig-java-defaults"],
 }
 
+cc_aconfig_library {
+    name: "android.media.audiopolicy-aconfig-cc",
+    aconfig_declarations: "android.media.audiopolicy-aconfig",
+    defaults: ["audio-aconfig-cc-defaults"],
+}
+
 filegroup {
     name: "audio-framework-aconfig",
     srcs: [
diff --git a/media/audio/aconfig/aaudio.aconfig b/media/audio/aconfig/aaudio.aconfig
index 7196525..c160109 100644
--- a/media/audio/aconfig/aaudio.aconfig
+++ b/media/audio/aconfig/aaudio.aconfig
@@ -3,6 +3,7 @@
 # Please add flags in alphabetical order.
 
 package: "com.android.media.aaudio"
+container: "system"
 
 flag {
     name: "sample_rate_conversion"
diff --git a/media/audio/aconfig/audio.aconfig b/media/audio/aconfig/audio.aconfig
index 73cb8ca..4d0df77 100644
--- a/media/audio/aconfig/audio.aconfig
+++ b/media/audio/aconfig/audio.aconfig
@@ -3,6 +3,7 @@
 # Please add flags in alphabetical order.
 
 package: "com.android.media.audio"
+container: "system"
 
 flag {
     name: "alarm_min_volume_zero"
@@ -37,8 +38,30 @@
 }
 
 flag {
+    name: "ringer_mode_affects_alarm"
+    namespace: "media_audio"
+    description:
+        "Support a configuration where ringer mode affects alarm stream"
+    bug: "312456558"
+}
+
+flag {
     name: "spatializer_offload"
     namespace: "media_audio"
     description: "Enable spatializer offload"
     bug: "307842941"
 }
+
+flag {
+    name: "stereo_spatialization"
+    namespace: "media_audio"
+    description: "Enable stereo channel mask for spatialization."
+    bug: "303920722"
+}
+
+flag {
+    name: "volume_refactoring"
+    namespace: "media_audio"
+    description: "Refactor the audio volume internal architecture logic"
+    bug: "324152869"
+}
diff --git a/media/audio/aconfig/audio_framework.aconfig b/media/audio/aconfig/audio_framework.aconfig
index 294e67d..525dceb 100644
--- a/media/audio/aconfig/audio_framework.aconfig
+++ b/media/audio/aconfig/audio_framework.aconfig
@@ -21,6 +21,31 @@
     bug: "302323921"
 }
 
+flag {
+    name: "feature_spatial_audio_headtracking_low_latency"
+    namespace: "media_audio"
+    description: "Define feature for low latency headtracking for SA"
+    bug: "324291076"
+}
+
+flag {
+    name: "focus_exclusive_with_recording"
+    namespace: "media_audio"
+    description:
+        "Audio focus GAIN_TRANSIENT_EXCLUSIVE only mutes"
+        "notifications when the focus owner is also recording"
+    bug: "316414750"
+}
+
+flag {
+    name: "foreground_audio_control"
+    namespace: "media_audio"
+    description:
+        "Audio focus gain requires FGS or delegation to "
+	"take effect"
+    bug: "296232417"
+}
+
 # TODO remove
 flag {
     name: "focus_freeze_test_api"
@@ -42,5 +67,28 @@
 Enable the API for providing loudness metadata and CTA-2075 \
 support."
     bug: "298463873"
+    is_exported: true
 }
 
+flag {
+    name: "sco_managed_by_audio"
+    namespace: "media_audio"
+    description: "\
+Enable new implementation of headset profile device connection and\
+SCO audio activation."
+    bug: "265057196"
+}
+
+flag {
+    name: "supported_device_types_api"
+    namespace: "media_audio"
+    description: "Surface new API method AudioManager.getSupportedDeviceTypes()"
+    bug: "307537538"
+}
+
+flag {
+    name: "volume_ringer_api_hardening"
+    namespace: "media_audio"
+    description: "Limit access to volume and ringer SDK APIs in AudioManager"
+    bug: "296232417"
+}
diff --git a/media/audio/aconfig/audiopolicy_framework.aconfig b/media/audio/aconfig/audiopolicy_framework.aconfig
index 833730a..72a1e6c 100644
--- a/media/audio/aconfig/audiopolicy_framework.aconfig
+++ b/media/audio/aconfig/audiopolicy_framework.aconfig
@@ -4,6 +4,31 @@
 # Please add flags in alphabetical order.
 
 package: "android.media.audiopolicy"
+container: "system"
+
+flag {
+    name: "audio_mix_ownership"
+    namespace: "media_audio"
+    description: "Improves ownership model of AudioMixes and the relationship between AudioPolicy and AudioMix."
+    bug: "309080867"
+    is_fixed_read_only: true
+}
+
+flag {
+    name: "audio_mix_policy_ordering"
+    namespace: "media_audio"
+    description: "Orders AudioMixes per registered AudioPolicy."
+    bug: "309080867"
+    is_fixed_read_only: true
+}
+
+flag {
+    name: "audio_mix_test_api"
+    namespace: "media_audio"
+    description: "Enable new Test APIs that provide access to registered AudioMixes on system server and native side."
+    bug: "309080867"
+    is_fixed_read_only: true
+}
 
 flag {
     name: "audio_policy_update_mixing_rules_api"
@@ -11,3 +36,25 @@
     description: "Enable AudioPolicy.updateMixingRules API for hot-swapping audio mixing rules."
     bug: "293874525"
 }
+
+flag {
+    name: "enable_fade_manager_configuration"
+    namespace: "media_audio"
+    description: "Enable Fade Manager Configuration support to determine fade properties"
+    bug: "307354764"
+}
+
+flag {
+    name: "multi_zone_audio"
+    namespace: "media_audio"
+    description: "Enable multi-zone audio support in audio product strategies."
+    bug: "316643994"
+}
+
+flag {
+    name: "record_audio_device_aware_permission"
+    namespace: "media_audio"
+    description: "Enable device-aware permission handling for RECORD_AUDIO permission"
+    bug: "291737188"
+    is_fixed_read_only: true
+}
\ No newline at end of file
diff --git a/media/audio/aconfig/audioserver.aconfig b/media/audio/aconfig/audioserver.aconfig
index 21ea1a2..5c6504f 100644
--- a/media/audio/aconfig/audioserver.aconfig
+++ b/media/audio/aconfig/audioserver.aconfig
@@ -3,6 +3,7 @@
 # Please add flags in alphabetical order.
 
 package: "com.android.media.audioserver"
+container: "system"
 
 flag {
     name: "direct_track_reprioritization"
diff --git a/media/audio/aconfig/midi_flags.aconfig b/media/audio/aconfig/midi_flags.aconfig
index ff9238a..efb643f 100644
--- a/media/audio/aconfig/midi_flags.aconfig
+++ b/media/audio/aconfig/midi_flags.aconfig
@@ -4,6 +4,7 @@
 # Please add flags in alphabetical order.
 
 package: "android.media.midi"
+container: "system"
 
 flag {
     name: "virtual_ump"
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 2a33048..0384e2e 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -1049,7 +1049,7 @@
             memcpy(wView.data(), encoded_packet->data.frame.buf, encoded_packet->data.frame.sz);
             ++mNumInputFrames;
 
-            ALOGD("bytes generated %zu", encoded_packet->data.frame.sz);
+            ALOGV("bytes generated %zu", encoded_packet->data.frame.sz);
             uint32_t flags = 0;
             if (eos) {
                 flags |= C2FrameData::FLAG_END_OF_STREAM;
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index c3b32e6..9d9b574 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -2011,10 +2011,20 @@
         __INTRODUCED_IN(28);
 
 /**
- * Passes back the time at which a particular frame was presented.
+ * Returns the time at which a particular frame was played on a speaker or headset,
+ * or was recorded on a microphone.
+ *
  * This can be used to synchronize audio with video or MIDI.
  * It can also be used to align a recorded stream with a playback stream.
  *
+ * The framePosition is an index into the stream of audio data.
+ * The first frame played or recorded is at framePosition 0.
+ *
+ * These framePositions are the same units that you get from AAudioStream_getFramesRead()
+ * or AAudioStream_getFramesWritten().
+ * A "frame" is a set of audio sample values that are played simultaneously.
+ * For example, a stereo stream has two samples in a frame, left and right.
+ *
  * Timestamps are only valid when the stream is in {@link #AAUDIO_STREAM_STATE_STARTED}.
  * {@link #AAUDIO_ERROR_INVALID_STATE} will be returned if the stream is not started.
  * Note that because requestStart() is asynchronous, timestamps will not be valid until
@@ -2030,8 +2040,8 @@
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param clockid CLOCK_MONOTONIC or CLOCK_BOOTTIME
- * @param framePosition pointer to a variable to receive the position
- * @param timeNanoseconds pointer to a variable to receive the time
+ * @param[out] framePosition pointer to a variable to receive the position
+ * @param[out] timeNanoseconds pointer to a variable to receive the time
  * @return {@link #AAUDIO_OK} or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* _Nonnull stream,
diff --git a/media/libaaudio/include/aaudio/AAudioTesting.h b/media/libaaudio/include/aaudio/AAudioTesting.h
index 01d97b6..d67ec70 100644
--- a/media/libaaudio/include/aaudio/AAudioTesting.h
+++ b/media/libaaudio/include/aaudio/AAudioTesting.h
@@ -49,12 +49,6 @@
 };
 typedef int32_t aaudio_policy_t;
 
-// Internal error codes. Only used by the framework.
-enum {
-    AAUDIO_INTERNAL_ERROR_BASE = -1000,
-    AAUDIO_ERROR_STANDBY,
-};
-
 /**
  * Control whether AAudioStreamBuilder_openStream() will use the new MMAP data path
  * or the older "Legacy" data path.
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index e1d517e..0c55fca 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -72,6 +72,12 @@
      */
     int32_t pull(void *destination, int32_t targetFramesToRead);
 
+    // Reset the entire graph so that volume ramps start at their
+    // target value and sample rate converters start with no phase offset.
+    void reset() {
+        mSink->pullReset();
+    }
+
     /**
      * Set numFramesToWrite frames from the source into the flowgraph.
      * Then, attempt to read targetFramesToRead from the flowgraph.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 52925d9..7648e25 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -199,6 +199,7 @@
         if (getSampleRate() != getDeviceSampleRate()) {
             ALOGD("%s - skipping sample rate converter. SR = %d, Device SR = %d", __func__,
                     getSampleRate(), getDeviceSampleRate());
+            result = AAUDIO_ERROR_INVALID_RATE;
             goto error;
         }
     }
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5bac2ca..5d4c3d4 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -100,6 +100,10 @@
 }
 
 void AudioStreamInternalPlay::prepareBuffersForStart() {
+    // Reset volume ramps to avoid a starting noise.
+    // This was called here instead of AudioStreamInternal so that
+    // it will be easier to backport.
+    mFlowGraph.reset();
     // Prevent stale data from being played.
     mAudioEndpoint->eraseDataMemory();
 }
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
index 6c22744..8af49b4 100644
--- a/media/libaaudio/src/core/AudioGlobal.h
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -22,6 +22,14 @@
 
 namespace aaudio {
 
+// Internal error codes. Only used by the framework.
+enum {
+    AAUDIO_INTERNAL_ERROR_BASE = -1000,
+    AAUDIO_ERROR_STANDBY,
+    AAUDIO_ERROR_ALREADY_CLOSED,
+
+};
+
 aaudio_policy_t AudioGlobal_getMMapPolicy();
 aaudio_result_t AudioGlobal_setMMapPolicy(aaudio_policy_t policy);
 
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 59fdabc..d729047 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -69,16 +69,24 @@
     audio_channel_mask_t channelMask =
             AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
 
+    // Set flags based on selected parameters.
     audio_output_flags_t flags;
     aaudio_performance_mode_t perfMode = getPerformanceMode();
     switch(perfMode) {
-        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY: {
             // Bypass the normal mixer and go straight to the FAST mixer.
-            // If the app asks for a sessionId then it means they want to use effects.
-            // So don't use RAW flag.
-            flags = (audio_output_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
-                    ? (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)
-                    : (AUDIO_OUTPUT_FLAG_FAST));
+            // Some Usages need RAW mode so they can get the lowest possible latency.
+            // Other Usages should avoid RAW because it can interfere with
+            // dual sink routing or other features.
+            bool usageBenefitsFromRaw = getUsage() == AAUDIO_USAGE_GAME ||
+                    getUsage() == AAUDIO_USAGE_MEDIA;
+            // If an app does not ask for a sessionId then there will be no effects.
+            // So we can use the use RAW flag.
+            flags = (audio_output_flags_t) (((requestedSessionId == AAUDIO_SESSION_ID_NONE)
+                                             && usageBenefitsFromRaw)
+                                            ? (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)
+                                            : (AUDIO_OUTPUT_FLAG_FAST));
+        }
             break;
 
         case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index d59afef..30efeb0 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -248,3 +248,30 @@
     srcs: ["test_idle_disconnected_shared_stream.cpp"],
     shared_libs: ["libaaudio"],
 }
+
+cc_test {
+    name: "test_multiple_close_simultaneously",
+    defaults: [
+        "latest_android_media_audio_common_types_cpp_shared",
+        "libaaudio_tests_defaults",
+    ],
+    srcs: ["test_multiple_close_simultaneously.cpp"],
+    shared_libs: [
+        "aaudio-aidl-cpp",
+        "framework-permission-aidl-cpp",
+        "libaaudio",
+        "libbinder",
+        "liblog",
+        "libutils",
+    ],
+    // This test will run 1 minute to ensure there is no crash happen.
+    // In that case, set the timeout as 2 minutes to allow the test to complete.
+    test_options: {
+        test_runner_options: [
+            {
+                name: "native-test-timeout",
+                value: "2m",
+            }
+        ],
+    },
+}
diff --git a/media/libaaudio/tests/test_multiple_close_simultaneously.cpp b/media/libaaudio/tests/test_multiple_close_simultaneously.cpp
new file mode 100644
index 0000000..f6351b6
--- /dev/null
+++ b/media/libaaudio/tests/test_multiple_close_simultaneously.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "test_multiple_close_simultaneously"
+
+#include <chrono>
+#include <condition_variable>
+#include <shared_mutex>
+#include <string>
+#include <thread>
+
+#include <gtest/gtest.h>
+
+#include <binder/IBinder.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+#include <aaudio/AAudio.h>
+#include <aaudio/IAAudioService.h>
+#include <aaudio/StreamRequest.h>
+#include <aaudio/StreamParameters.h>
+
+using namespace android;
+using namespace aaudio;
+
+#define AAUDIO_SERVICE_NAME "media.aaudio"
+
+static constexpr int THREAD_NUM = 2;
+static constexpr auto TEST_DURATION = std::chrono::minutes(1);
+
+static std::string sError;
+static bool sTestPassed = true;
+
+struct Signal {
+    std::atomic_int value{0};
+    std::shared_mutex lock;
+    std::condition_variable_any cv;
+};
+
+class AAudioServiceDeathRecipient : public IBinder::DeathRecipient {
+public:
+    void binderDied(const wp<IBinder>& who __unused) override {
+        sError = "AAudioService is dead";
+        ALOGE("%s", sError.c_str());
+        sTestPassed = false;
+    }
+};
+
+sp<IAAudioService> getAAudioService(const sp<IBinder::DeathRecipient>& recipient) {
+    auto sm = defaultServiceManager();
+    if (sm == nullptr) {
+        sError = "Cannot get service manager";
+        ALOGE("%s", sError.c_str());
+        return nullptr;
+    }
+    sp<IBinder> binder = sm->waitForService(String16(AAUDIO_SERVICE_NAME));
+    if (binder == nullptr) {
+        sError = "Cannot get aaudio service";
+        ALOGE("%s", sError.c_str());
+        return nullptr;
+    }
+    if (binder->linkToDeath(recipient) != NO_ERROR) {
+        sError = "Cannot link to binder death";
+        ALOGE("%s", sError.c_str());
+        return nullptr;
+    }
+    return interface_cast<IAAudioService>(binder);
+}
+
+void openAndMultipleClose(const sp<IAAudioService>& aaudioService) {
+    auto start = std::chrono::system_clock::now();
+    bool hasFailedOpening = false;
+    while (sTestPassed && std::chrono::system_clock::now() - start < TEST_DURATION) {
+        StreamRequest inRequest;
+        StreamParameters outParams;
+        int32_t handle = 0;
+        inRequest.attributionSource.uid = getuid();
+        inRequest.attributionSource.pid = getpid();
+        inRequest.attributionSource.token = sp<BBinder>::make();
+        auto status = aaudioService->openStream(inRequest, &outParams, &handle);
+        if (!status.isOk()) {
+            sError = "Cannot open stream, it can be caused by service death";
+            ALOGE("%s", sError.c_str());
+            sTestPassed = false;
+            break;
+        }
+        if (handle <= 0) {
+            sError = "Cannot get stream handle after open, returned handle"
+                    + std::to_string(handle);
+            ALOGE("%s", sError.c_str());
+            sTestPassed = false;
+            break;
+        }
+        hasFailedOpening = false;
+
+        Signal isReady;
+        Signal startWork;
+        Signal isCompleted;
+        std::unique_lock readyLock(isReady.lock);
+        std::unique_lock completedLock(isCompleted.lock);
+        for (int i = 0; i < THREAD_NUM; ++i) {
+            std::thread closeStream([aaudioService, handle, &isReady, &startWork, &isCompleted] {
+                isReady.value++;
+                isReady.cv.notify_one();
+                {
+                    std::shared_lock<std::shared_mutex> _l(startWork.lock);
+                    startWork.cv.wait(_l, [&startWork] { return startWork.value.load() == 1; });
+                }
+                int32_t result;
+                aaudioService->closeStream(handle, &result);
+                isCompleted.value++;
+                isCompleted.cv.notify_one();
+            });
+            closeStream.detach();
+        }
+        isReady.cv.wait(readyLock, [&isReady] { return isReady.value == THREAD_NUM; });
+        {
+            std::unique_lock startWorkLock(startWork.lock);
+            startWork.value.store(1);
+        }
+        startWork.cv.notify_all();
+        isCompleted.cv.wait_for(completedLock,
+                                std::chrono::milliseconds(1000),
+                                [&isCompleted] { return isCompleted.value == THREAD_NUM; });
+        if (isCompleted.value != THREAD_NUM) {
+            sError = "Close is not completed within 1 second";
+            ALOGE("%s", sError.c_str());
+            sTestPassed = false;
+            break;
+        }
+    }
+}
+
+TEST(test_multiple_close_simultaneously, open_multiple_close) {
+    const auto recipient = sp<AAudioServiceDeathRecipient>::make();
+    auto aaudioService = getAAudioService(recipient);
+    ASSERT_NE(nullptr, aaudioService) << sError;
+    openAndMultipleClose(aaudioService);
+    ASSERT_TRUE(sTestPassed) << sError;
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 51a679b..369e917 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -67,6 +67,7 @@
         "libaudioclient_aidl_conversion",
         "libaudioutils",
         "libbinder",
+        "libbinder_ndk",
         "libcutils",
         "liblog",
         "libutils",
@@ -121,6 +122,7 @@
         "latest_android_media_audio_common_types_cpp_shared",
     ],
     shared_libs: [
+        "android.media.audiopolicy-aconfig-cc",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index d9fd58c..1417182 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -60,9 +60,13 @@
 }
 
 // Keep in sync with android/media/audiopolicy/AudioProductStrategy#attributeMatches
-int AudioProductStrategy::attributesMatchesScore(const audio_attributes_t refAttributes,
-                                                 const audio_attributes_t clientAttritubes)
+int AudioProductStrategy::attributesMatchesScore(audio_attributes_t refAttributes,
+                                                 audio_attributes_t clientAttritubes)
 {
+    refAttributes.flags = static_cast<audio_flags_mask_t>(
+            refAttributes.flags & AUDIO_FLAGS_AFFECT_STRATEGY_SELECTION);
+    clientAttritubes.flags = static_cast<audio_flags_mask_t>(
+            clientAttritubes.flags & AUDIO_FLAGS_AFFECT_STRATEGY_SELECTION);
     if (refAttributes == clientAttritubes) {
         return MATCH_EQUALS;
     }
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 5bfdd5f..348e25f 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -22,6 +22,7 @@
 #include <android/media/IAudioPolicyService.h>
 #include <android/media/AudioMixUpdate.h>
 #include <android/media/BnCaptureStateListener.h>
+#include <android_media_audiopolicy.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
 #include <binder/IPCThreadState.h>
@@ -44,6 +45,8 @@
 
 // ----------------------------------------------------------------------------
 
+namespace audio_flags = android::media::audiopolicy;
+
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
@@ -1843,6 +1846,25 @@
     return statusTFromBinderStatus(aps->registerPolicyMixes(mixesAidl, registration));
 }
 
+status_t AudioSystem::getRegisteredPolicyMixes(std::vector<AudioMix>& mixes) {
+    if (!audio_flags::audio_mix_test_api()) {
+        return INVALID_OPERATION;
+    }
+
+    const sp<IAudioPolicyService> aps = AudioSystem::get_audio_policy_service();
+    if (aps == nullptr) return PERMISSION_DENIED;
+
+    std::vector<::android::media::AudioMix> aidlMixes;
+    Status status = aps->getRegisteredPolicyMixes(&aidlMixes);
+
+    for (const auto& aidlMix : aidlMixes) {
+        AudioMix mix = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioMix(aidlMix));
+        mixes.push_back(mix);
+    }
+
+    return statusTFromBinderStatus(status);
+}
+
 status_t AudioSystem::updatePolicyMixes(
         const std::vector<std::pair<AudioMix, std::vector<AudioMixMatchCriterion>>>&
                 mixesWithUpdates) {
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 1815293..565427b 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -2859,7 +2859,9 @@
 
     if (isOffloadedOrDirect_l() || mDoNotReconnect) {
         // FIXME re-creation of offloaded and direct tracks is not yet implemented;
-        // reconsider enabling for linear PCM encodings when position can be preserved.
+        // Disabled since (1) timestamp correction is not implemented for non-PCM and
+        // (2) We pre-empt existing direct tracks on resource constraint, so these tracks
+        // shouldn't reconnect.
         result = DEAD_OBJECT;
         return result;
     }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 52c8da0..633493c 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -263,6 +263,8 @@
 
     void registerPolicyMixes(in AudioMix[] mixes, boolean registration);
 
+    List<AudioMix> getRegisteredPolicyMixes();
+
     void updatePolicyMixes(in AudioMixUpdate[] updates);
 
     void setUidDeviceAffinities(int /* uid_t */ uid, in AudioDevice[] devices);
diff --git a/media/libaudioclient/include/media/AudioProductStrategy.h b/media/libaudioclient/include/media/AudioProductStrategy.h
index fcbb019..2505b11 100644
--- a/media/libaudioclient/include/media/AudioProductStrategy.h
+++ b/media/libaudioclient/include/media/AudioProductStrategy.h
@@ -58,11 +58,11 @@
      * @return {@code INVALID_SCORE} if not matching, {@code MATCH_ON_DEFAULT_SCORE} if matching
      * to default strategy, non zero positive score if matching a strategy.
      */
-    static int attributesMatchesScore(const audio_attributes_t refAttributes,
-                                      const audio_attributes_t clientAttritubes);
+    static int attributesMatchesScore(audio_attributes_t refAttributes,
+                                      audio_attributes_t clientAttritubes);
 
-    static bool attributesMatches(const audio_attributes_t refAttributes,
-                                      const audio_attributes_t clientAttritubes) {
+    static bool attributesMatches(audio_attributes_t refAttributes,
+                                  audio_attributes_t clientAttritubes) {
         return attributesMatchesScore(refAttributes, clientAttritubes) > 0;
     }
 
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index a1f7941..acbcf3f 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -462,6 +462,8 @@
 
     static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
 
+    static status_t getRegisteredPolicyMixes(std::vector<AudioMix>& mixes);
+
     static status_t updatePolicyMixes(
         const std::vector<
                 std::pair<AudioMix, std::vector<AudioMixMatchCriterion>>>& mixesWithUpdates);
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index f72ac89..0da242d 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
index 01fc7fb..347afa6 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
@@ -26,6 +26,7 @@
 #include <aidl/android/hardware/audio/core/IModule.h>
 #include <aidl/android/media/audio/BnHalAdapterVendorExtension.h>
 #include <android/binder_manager.h>
+#include <cutils/properties.h>
 #include <media/AidlConversionNdkCpp.h>
 #include <media/AidlConversionUtil.h>
 #include <utils/Log.h>
@@ -121,8 +122,8 @@
     std::shared_ptr<IHalAdapterVendorExtension> getService(bool reset = false) {
         std::lock_guard l(mLock);
         if (reset || !mVendorExt.has_value()) {
-            auto serviceName = std::string(IHalAdapterVendorExtension::descriptor) + "/default";
-            if (AServiceManager_isDeclared(serviceName.c_str())) {
+            if (property_get_bool("ro.audio.ihaladaptervendorextension_enabled", false)) {
+                auto serviceName = std::string(IHalAdapterVendorExtension::descriptor) + "/default";
                 mVendorExt = std::shared_ptr<IHalAdapterVendorExtension>(
                         IHalAdapterVendorExtension::fromBinder(ndk::SpAIBinder(
                                         AServiceManager_waitForService(serviceName.c_str()))));
diff --git a/media/libaudiohal/impl/EffectProxy.cpp b/media/libaudiohal/impl/EffectProxy.cpp
index d73a36c..d440ef8 100644
--- a/media/libaudiohal/impl/EffectProxy.cpp
+++ b/media/libaudiohal/impl/EffectProxy.cpp
@@ -156,6 +156,7 @@
 }
 
 ndk::ScopedAStatus EffectProxy::close() {
+    command(CommandId::STOP);
     return runWithAllSubEffects([&](std::shared_ptr<IEffect>& effect) {
         return effect->close();
     });
diff --git a/media/libaudiohal/tests/Android.bp b/media/libaudiohal/tests/Android.bp
index 97510d6..fa12cc4 100644
--- a/media/libaudiohal/tests/Android.bp
+++ b/media/libaudiohal/tests/Android.bp
@@ -17,6 +17,7 @@
 // frameworks/av/include.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 085a7e4..ee4075f 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -32,6 +32,7 @@
 #include <private/media/VideoFrame.h>
 #include <utils/Log.h>
 #include <utils/RefBase.h>
+#include <algorithm>
 #include <vector>
 
 HeifDecoder* createHeifDecoder() {
@@ -42,7 +43,10 @@
 
 void initFrameInfo(HeifFrameInfo *info, const VideoFrame *videoFrame) {
     info->mWidth = videoFrame->mDisplayWidth;
-    info->mHeight = videoFrame->mDisplayHeight;
+    // Number of scanlines is mDisplayHeight. Clamp it to mHeight to guard
+    // against malformed streams claiming that mDisplayHeight is greater than
+    // mHeight.
+    info->mHeight = std::min(videoFrame->mDisplayHeight, videoFrame->mHeight);
     info->mRotationAngle = videoFrame->mRotationAngle;
     info->mBytesPerPixel = videoFrame->mBytesPerPixel;
     info->mDurationUs = videoFrame->mDurationUs;
@@ -746,7 +750,9 @@
                    (videoFrame->mRowBytes * (mCurScanline + videoFrame->mDisplayTop)) +
                    (videoFrame->mBytesPerPixel * videoFrame->mDisplayLeft);
     mCurScanline++;
-    memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mDisplayWidth);
+    // Do not try to copy more than |videoFrame->mWidth| pixels.
+    uint32_t width = std::min(videoFrame->mDisplayWidth, videoFrame->mWidth);
+    memcpy(dst, src, videoFrame->mBytesPerPixel * width);
     return true;
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 89348a4..3ab32f0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -111,9 +111,12 @@
 // To collect the encoder usage for the battery app
 static void addBatteryData(uint32_t params) {
     sp<IBinder> binder =
-        defaultServiceManager()->getService(String16("media.player"));
+        defaultServiceManager()->waitForService(String16("media.player"));
     sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
-    CHECK(service.get() != NULL);
+    if (service.get() == nullptr) {
+        ALOGE("%s: Failed to get media.player service", __func__);
+        return;
+    }
 
     service->addBatteryData(params);
 }
@@ -1453,29 +1456,44 @@
 }
 
 status_t StagefrightRecorder::setupAACRecording() {
-    // FIXME:
-    // Add support for OUTPUT_FORMAT_AAC_ADIF
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS);
+    // TODO(b/324512842): Add support for OUTPUT_FORMAT_AAC_ADIF
+    if (mOutputFormat != OUTPUT_FORMAT_AAC_ADTS) {
+        ALOGE("Invalid output format %d used for AAC recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
-    CHECK(mAudioEncoder == AUDIO_ENCODER_AAC ||
-          mAudioEncoder == AUDIO_ENCODER_HE_AAC ||
-          mAudioEncoder == AUDIO_ENCODER_AAC_ELD);
-    CHECK(mAudioSource != AUDIO_SOURCE_CNT);
+    if (mAudioEncoder != AUDIO_ENCODER_AAC
+            && mAudioEncoder != AUDIO_ENCODER_HE_AAC
+            && mAudioEncoder != AUDIO_ENCODER_AAC_ELD) {
+        ALOGE("Invalid encoder %d used for AAC recording", mAudioEncoder);
+        return BAD_VALUE;
+    }
+
+    if (mAudioSource == AUDIO_SOURCE_CNT) {
+        ALOGE("Audio source hasn't been set correctly");
+        return BAD_VALUE;
+    }
 
     mWriter = new AACWriter(mOutputFd);
     return setupRawAudioRecording();
 }
 
 status_t StagefrightRecorder::setupOggRecording() {
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_OGG);
+    if (mOutputFormat != OUTPUT_FORMAT_OGG) {
+        ALOGE("Invalid output format %d used for OGG recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     mWriter = new OggWriter(mOutputFd);
     return setupRawAudioRecording();
 }
 
 status_t StagefrightRecorder::setupAMRRecording() {
-    CHECK(mOutputFormat == OUTPUT_FORMAT_AMR_NB ||
-          mOutputFormat == OUTPUT_FORMAT_AMR_WB);
+    if (mOutputFormat != OUTPUT_FORMAT_AMR_NB
+            && mOutputFormat != OUTPUT_FORMAT_AMR_WB) {
+        ALOGE("Invalid output format %d used for AMR recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     if (mOutputFormat == OUTPUT_FORMAT_AMR_NB) {
         if (mAudioEncoder != AUDIO_ENCODER_DEFAULT &&
@@ -1528,7 +1546,10 @@
 }
 
 status_t StagefrightRecorder::setupRTPRecording() {
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_RTP_AVP);
+    if (mOutputFormat != OUTPUT_FORMAT_RTP_AVP) {
+        ALOGE("Invalid output format %d used for RTP recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     if ((mAudioSource != AUDIO_SOURCE_CNT
                 && mVideoSource != VIDEO_SOURCE_LIST_END)
@@ -1571,7 +1592,10 @@
 }
 
 status_t StagefrightRecorder::setupMPEG2TSRecording() {
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_MPEG2TS);
+    if (mOutputFormat != OUTPUT_FORMAT_MPEG2TS) {
+        ALOGE("Invalid output format %d used for MPEG2TS recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);
 
diff --git a/media/libmediaplayerservice/fuzzer/Android.bp b/media/libmediaplayerservice/fuzzer/Android.bp
index 507da29..74b0a85 100644
--- a/media/libmediaplayerservice/fuzzer/Android.bp
+++ b/media/libmediaplayerservice/fuzzer/Android.bp
@@ -110,6 +110,17 @@
         "libresourcemanagerservice",
         "libmediametricsservice",
         "mediametricsservice-aidl-cpp",
+        "libcameraservice",
+        "android.hardware.camera.common@1.0",
+        "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.provider@2.5",
+        "android.hardware.camera.provider@2.6",
+        "android.hardware.camera.provider@2.7",
+        "android.hardware.camera.provider-V3-ndk",
+        "android.hardware.camera.device@1.0",
+        "android.hardware.camera.device@3.2",
+        "android.hardware.camera.device@3.4",
+        "libaudiohal@7.0",
     ],
     header_libs: [
         "libaudiohal_headers",
diff --git a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
index fdac1a1..2518c21 100644
--- a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
+++ b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
@@ -15,22 +15,22 @@
  *
  */
 
-#include <media/stagefright/foundation/AString.h>
-#include "fuzzer/FuzzedDataProvider.h"
-
 #include <AudioFlinger.h>
 #include <MediaPlayerService.h>
 #include <ResourceManagerService.h>
-#include <fakeservicemanager/FakeServiceManager.h>
 #include <StagefrightRecorder.h>
 #include <camera/Camera.h>
 #include <camera/android/hardware/ICamera.h>
+#include <fakeservicemanager/FakeServiceManager.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
 #include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/foundation/AString.h>
 #include <mediametricsservice/MediaMetricsService.h>
 #include <thread>
+#include "CameraService.h"
+#include "fuzzer/FuzzedDataProvider.h"
 
 using namespace std;
 using namespace android;
@@ -46,32 +46,27 @@
     AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_SOURCE_VOICE_COMMUNICATION,
     AUDIO_SOURCE_REMOTE_SUBMIX,     AUDIO_SOURCE_UNPROCESSED,
     AUDIO_SOURCE_VOICE_PERFORMANCE, AUDIO_SOURCE_ECHO_REFERENCE,
-    AUDIO_SOURCE_FM_TUNER,          AUDIO_SOURCE_HOTWORD};
+    AUDIO_SOURCE_FM_TUNER,          AUDIO_SOURCE_HOTWORD,
+    AUDIO_SOURCE_ULTRASOUND};
+
+constexpr output_format kOutputFormat[] = {
+        OUTPUT_FORMAT_DEFAULT,        OUTPUT_FORMAT_THREE_GPP,
+        OUTPUT_FORMAT_MPEG_4,         OUTPUT_FORMAT_AUDIO_ONLY_START,
+        OUTPUT_FORMAT_RAW_AMR,        OUTPUT_FORMAT_AMR_NB,
+        OUTPUT_FORMAT_AMR_WB,         OUTPUT_FORMAT_AAC_ADTS,
+        OUTPUT_FORMAT_AUDIO_ONLY_END, OUTPUT_FORMAT_RTP_AVP,
+        OUTPUT_FORMAT_MPEG2TS,        OUTPUT_FORMAT_WEBM,
+        OUTPUT_FORMAT_HEIF,           OUTPUT_FORMAT_OGG,
+        OUTPUT_FORMAT_LIST_END};
+
+constexpr video_encoder kVideoEncoder[] = {
+        VIDEO_ENCODER_DEFAULT,      VIDEO_ENCODER_H263, VIDEO_ENCODER_H264,
+        VIDEO_ENCODER_MPEG_4_SP,    VIDEO_ENCODER_VP8,  VIDEO_ENCODER_HEVC,
+        VIDEO_ENCODER_DOLBY_VISION, VIDEO_ENCODER_AV1,  VIDEO_ENCODER_LIST_END};
 
 constexpr audio_microphone_direction_t kSupportedMicrophoneDirections[] = {
     MIC_DIRECTION_UNSPECIFIED, MIC_DIRECTION_FRONT, MIC_DIRECTION_BACK, MIC_DIRECTION_EXTERNAL};
 
-struct RecordingConfig {
-    output_format outputFormat;
-    audio_encoder audioEncoder;
-    video_encoder videoEncoder;
-};
-
-const struct RecordingConfig kRecordingConfigList[] = {
-    {OUTPUT_FORMAT_AMR_NB, AUDIO_ENCODER_AMR_NB, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AMR_WB, AUDIO_ENCODER_AMR_WB, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_HE_AAC, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC_ELD, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_OGG, AUDIO_ENCODER_OPUS, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_RTP_AVP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_MPEG2TS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
-    {OUTPUT_FORMAT_WEBM, AUDIO_ENCODER_VORBIS, VIDEO_ENCODER_VP8},
-    {OUTPUT_FORMAT_THREE_GPP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
-    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
-    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
-    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_HEVC}};
-
 const string kParametersList[] = {"max-duration",
                                   "max-filesize",
                                   "interleave-duration-us",
@@ -104,14 +99,16 @@
                                   "rtp-param-ext-cvo-degrees",
                                   "video-param-request-i-frame",
                                   "rtp-param-set-socket-dscp",
-                                  "rtp-param-set-socket-network"};
+                                  "rtp-param-set-socket-network",
+                                  "rtp-param-set-socket-ecn",
+                                  "rtp-param-remote-ip",
+                                  "rtp-param-set-socket-network",
+                                  "log-session-id"};
 
-constexpr int32_t kMaxSleepTimeInMs = 100;
-constexpr int32_t kMinSleepTimeInMs = 0;
 constexpr int32_t kMinVideoSize = 2;
 constexpr int32_t kMaxVideoSize = 8192;
-constexpr int32_t kNumRecordMin = 1;
-constexpr int32_t kNumRecordMax = 10;
+const char kOutputFile[] = "OutputFile";
+const char kNextOutputFile[] = "NextOutputFile";
 
 class TestAudioDeviceCallback : public AudioSystem::AudioDeviceCallback {
    public:
@@ -194,8 +191,7 @@
     int32_t max;
     mStfRecorder->getMaxAmplitude(&max);
 
-    int32_t deviceId = mFdp.ConsumeIntegral<int32_t>();
-    mStfRecorder->setInputDevice(deviceId);
+    int32_t deviceId;
     mStfRecorder->getRoutedDeviceId(&deviceId);
 
     vector<android::media::MicrophoneInfoFw> activeMicrophones{};
@@ -213,101 +209,189 @@
     sp<IGraphicBufferProducer> buffer = mStfRecorder->querySurfaceMediaSource();
 }
 
-void MediaRecorderClientFuzzer::dumpInfo() {
-    int32_t dumpFd = memfd_create("DumpFile", MFD_ALLOW_SEALING);
-    Vector<String16> args;
-    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
-    mStfRecorder->dump(dumpFd, args);
-    close(dumpFd);
-}
-
-void MediaRecorderClientFuzzer::setConfig() {
-    mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
-    mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
-    mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
-    mStfRecorder->setPreferredMicrophoneDirection(
-        mFdp.PickValueInArray(kSupportedMicrophoneDirections));
-    mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool());
-    bool isPrivacySensitive;
-    mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
-    mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize),
-                               mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize));
-    mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>());
-    mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool());
-    mStfRecorder->setPreferredMicrophoneFieldDimension(mFdp.ConsumeFloatingPoint<float>());
-    mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
-
-    int32_t Idx = mFdp.ConsumeIntegralInRange<int32_t>(0, size(kRecordingConfigList) - 1);
-    mStfRecorder->setOutputFormat(kRecordingConfigList[Idx].outputFormat);
-    mStfRecorder->setAudioEncoder(kRecordingConfigList[Idx].audioEncoder);
-    mStfRecorder->setVideoEncoder(kRecordingConfigList[Idx].videoEncoder);
-
-    int32_t nextOutputFd = memfd_create("NextOutputFile", MFD_ALLOW_SEALING);
-    mStfRecorder->setNextOutputFile(nextOutputFd);
-    close(nextOutputFd);
-
-    for (Idx = 0; Idx < size(kParametersList); ++Idx) {
-        if (mFdp.ConsumeBool()) {
-            int32_t value = mFdp.ConsumeIntegral<int32_t>();
-            mStfRecorder->setParameters(
-                String8((kParametersList[Idx] + "=" + to_string(value)).c_str()));
-        }
+template <typename FuncWrapper>
+void callMediaAPI(FuncWrapper funcWrapper, FuzzedDataProvider* fdp) {
+    if (fdp->ConsumeBool()) {
+        funcWrapper();
     }
 }
 
-MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t *data, size_t size)
-    : mFdp(data, size), mMediaRecorderOutputFd(memfd_create("OutputFile", MFD_ALLOW_SEALING)) {
+void MediaRecorderClientFuzzer::setConfig() {
+    callMediaAPI(
+            [this]() {
+                mSurfaceControl = mComposerClient.createSurface(
+                        String8(mFdp.ConsumeRandomLengthString().c_str()) /* name */,
+                        mFdp.ConsumeIntegral<uint32_t>() /* width */,
+                        mFdp.ConsumeIntegral<uint32_t>() /* height */,
+                        mFdp.ConsumeIntegral<int32_t>() /* pixel-format */,
+                        mFdp.ConsumeIntegral<int32_t>() /* flags */);
+                if (mSurfaceControl) {
+                    mSurface = mSurfaceControl->getSurface();
+                    mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
+                }
+            },
+            &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->setInputDevice(mFdp.ConsumeIntegral<int32_t>()); },
+                 &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
+                mStfRecorder->setListener(listener);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<TestCamera> testCamera = sp<TestCamera>::make();
+                sp<Camera> camera = Camera::create(testCamera);
+                mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
+                mStfRecorder->setInputSurface(persistentSurface);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
+                mStfRecorder->setAudioDeviceCallback(callback);
+                mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setPreferredMicrophoneDirection(
+                        mFdp.PickValueInArray(kSupportedMicrophoneDirections));
+            },
+            &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool()); }, &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                bool isPrivacySensitive;
+                mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(
+                                                   kMinVideoSize, kMaxVideoSize) /* width */,
+                                           mFdp.ConsumeIntegralInRange<int32_t>(
+                                                   kMinVideoSize, kMaxVideoSize) /* height */);
+            },
+            &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>()); },
+                 &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool()); }, &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setPreferredMicrophoneFieldDimension(
+                        mFdp.ConsumeFloatingPoint<float>());
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                output_format OutputFormat = mFdp.PickValueInArray(kOutputFormat);
+                audio_encoder AudioEncoderFormat =
+                        (audio_encoder)mFdp.ConsumeIntegralInRange<int32_t>(AUDIO_ENCODER_DEFAULT,
+                                                                            AUDIO_ENCODER_LIST_END);
+                video_encoder VideoEncoderFormat = mFdp.PickValueInArray(kVideoEncoder);
+                if (OutputFormat == OUTPUT_FORMAT_AMR_NB) {
+                    AudioEncoderFormat =
+                            mFdp.ConsumeBool() ? AUDIO_ENCODER_DEFAULT : AUDIO_ENCODER_AMR_NB;
+                } else if (OutputFormat == OUTPUT_FORMAT_AMR_WB) {
+                    AudioEncoderFormat = AUDIO_ENCODER_AMR_WB;
+                } else if (OutputFormat == OUTPUT_FORMAT_AAC_ADIF ||
+                           OutputFormat == OUTPUT_FORMAT_AAC_ADTS ||
+                           OutputFormat == OUTPUT_FORMAT_MPEG2TS) {
+                    AudioEncoderFormat = (audio_encoder)mFdp.ConsumeIntegralInRange<int32_t>(
+                            AUDIO_ENCODER_AAC, AUDIO_ENCODER_AAC_ELD);
+                    if (OutputFormat == OUTPUT_FORMAT_MPEG2TS) {
+                        VideoEncoderFormat = VIDEO_ENCODER_H264;
+                    }
+                }
+                mStfRecorder->setOutputFormat(OutputFormat);
+                mStfRecorder->setAudioEncoder(AudioEncoderFormat);
+                mStfRecorder->setVideoEncoder(VideoEncoderFormat);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                int32_t nextOutputFd = memfd_create(kNextOutputFile, MFD_ALLOW_SEALING);
+                mStfRecorder->setNextOutputFile(nextOutputFd);
+                close(nextOutputFd);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                for (int32_t idx = 0; idx < size(kParametersList); ++idx) {
+                    if (mFdp.ConsumeBool()) {
+                        int32_t value = mFdp.ConsumeIntegral<int32_t>();
+                        mStfRecorder->setParameters(
+                                String8((kParametersList[idx] + "=" + to_string(value)).c_str()));
+                    }
+                }
+            },
+            &mFdp);
+}
+
+MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t* data, size_t size)
+    : mFdp(data, size), mMediaRecorderOutputFd(memfd_create(kOutputFile, MFD_ALLOW_SEALING)) {
     AttributionSourceState attributionSource;
     attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
     attributionSource.token = sp<BBinder>::make();
     mStfRecorder = make_unique<StagefrightRecorder>(attributionSource);
-
-    mSurfaceControl = mComposerClient.createSurface(
-        String8(mFdp.ConsumeRandomLengthString().c_str()), mFdp.ConsumeIntegral<uint32_t>(),
-        mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<int32_t>(),
-        mFdp.ConsumeIntegral<int32_t>());
-    if (mSurfaceControl) {
-        mSurface = mSurfaceControl->getSurface();
-        mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
-    }
-
-    sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
-    mStfRecorder->setListener(listener);
-
-    sp<TestCamera> testCamera = sp<TestCamera>::make();
-    sp<Camera> camera = Camera::create(testCamera);
-    mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
-
-    sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
-    mStfRecorder->setInputSurface(persistentSurface);
-
-    sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
-    mStfRecorder->setAudioDeviceCallback(callback);
 }
 
 void MediaRecorderClientFuzzer::process() {
-    setConfig();
-
     mStfRecorder->init();
     mStfRecorder->prepare();
-    size_t numRecord = mFdp.ConsumeIntegralInRange<size_t>(kNumRecordMin, kNumRecordMax);
-    for (size_t Idx = 0; Idx < numRecord; ++Idx) {
-        mStfRecorder->start();
-        this_thread::sleep_for(chrono::milliseconds(
-            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
-        mStfRecorder->pause();
-        this_thread::sleep_for(chrono::milliseconds(
-            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
-        mStfRecorder->resume();
-        this_thread::sleep_for(chrono::milliseconds(
-            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
-        mStfRecorder->stop();
+    while (mFdp.remaining_bytes()) {
+        auto invokeMediaPLayerApi = mFdp.PickValueInArray<const std::function<void()>>({
+                [&]() { setConfig(); },
+                [&]() { mStfRecorder->start(); },
+                [&]() { mStfRecorder->pause(); },
+                [&]() { mStfRecorder->resume(); },
+                [&]() { mStfRecorder->stop(); },
+                [&]() { getConfig(); },
+                [&]() { mStfRecorder->close(); },
+                [&]() { mStfRecorder->reset(); },
+        });
+        invokeMediaPLayerApi();
     }
-    dumpInfo();
-    getConfig();
-
-    mStfRecorder->close();
-    mStfRecorder->reset();
 }
 
 extern "C" int LLVMFuzzerInitialize(int /* *argc */, char /* ***argv */) {
@@ -320,6 +404,7 @@
     MediaPlayerService::instantiate();
     AudioFlinger::instantiate();
     ResourceManagerService::instantiate();
+    CameraService::instantiate();
     fakeServiceManager->addService(String16(MediaMetricsService::kServiceName),
                                     new MediaMetricsService());
     return 0;
diff --git a/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp b/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp
index e8fea73..fb9f1e9 100644
--- a/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp
+++ b/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp
@@ -310,26 +310,31 @@
 }
 
 // Check if the input is valid by checking if it contains a sync word
-static bool isInputValid(uint8 *buf, uint32 inSize)
+static ERROR_CODE validate_input(uint8 *buf, uint32 inSize)
 {
-    // Buffer needs to contain at least 4 bytes which is the size of
-    // the header
-    if (inSize < 4) return false;
+    /*
+     * Verify that at least the header is complete
+     * Note that SYNC_WORD_LNGTH is in unit of bits, but inSize is in unit of bytes.
+     */
+    if (inSize < ((SYNC_WORD_LNGTH + 21) >> 3))
+    {
+        return NO_ENOUGH_MAIN_DATA_ERROR;
+    }
 
     size_t totalInSize = 0;
     size_t frameSize = 0;
     while (totalInSize <= (inSize - 4)) {
         if (!parseHeader(U32_AT(buf + totalInSize), &frameSize)) {
-            return false;
+            return SYNCH_LOST_ERROR;
         }
         // Buffer needs to be large enough to include complete frame
         if ((frameSize > inSize) || (totalInSize > (inSize - frameSize))) {
-            return false;
+            return SYNCH_LOST_ERROR;
         }
         totalInSize += frameSize;
     }
 
-    return true;
+    return NO_DECODING_ERROR;
 }
 
 ERROR_CODE pvmp3_framedecoder(tPVMP3DecoderExternal *pExt,
@@ -348,10 +353,11 @@
     mp3Header info_data;
     mp3Header *info = &info_data;
 
-    if (!isInputValid(pExt->pInputBuffer, pExt->inputBufferCurrentLength))
+    errorCode = validate_input(pExt->pInputBuffer, pExt->inputBufferCurrentLength);
+    if (errorCode != NO_DECODING_ERROR)
     {
         pExt->outputFrameSize = 0;
-        return SYNCH_LOST_ERROR;
+        return errorCode;
     }
 
     pVars->inputStream.pBuffer  = pExt->pInputBuffer;
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 2946398..c4f2808 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -27,7 +27,6 @@
 #include <media/AidlConversionUtil.h>
 #include <android/content/AttributionSourceState.h>
 
-#include <com_android_media_audio.h>
 #include <iterator>
 #include <algorithm>
 #include <pwd.h>
@@ -388,10 +387,6 @@
  */
 bool mustAnonymizeBluetoothAddress(
         const AttributionSourceState& attributionSource, const String16& caller) {
-    if (!com::android::media::audio::bluetooth_mac_address_anonymization()) {
-        return false;
-    }
-
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
     if (isAudioServerOrSystemServerUid(uid)) {
         return false;
diff --git a/services/Android.mk b/services/Android.mk
deleted file mode 100644
index c86a226..0000000
--- a/services/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-$(eval $(call declare-1p-copy-files,frameworks/av/services/audiopolicy,))
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index afd28e5..129541f 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -145,6 +145,7 @@
         "audioflinger-aidl-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
+        "com.android.media.audio-aconfig-cc",
         "effect-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libactivitymanager_aidl",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0425284..fcf02d9 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -311,7 +311,8 @@
     }
 
     mPatchPanel = IAfPatchPanel::create(sp<IAfPatchPanelCallback>::fromExisting(this));
-    mMelReporter = sp<MelReporter>::make(sp<IAfMelReporterCallback>::fromExisting(this));
+    mMelReporter = sp<MelReporter>::make(sp<IAfMelReporterCallback>::fromExisting(this),
+                                         mPatchPanel);
 }
 
 status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
@@ -4144,7 +4145,7 @@
         }
 
         // Only audio policy service can create a spatializer effect
-        if ((memcmp(&descOut.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0) &&
+        if (IAfEffectModule::isSpatializer(&descOut.type) &&
             (callingUid != AID_AUDIOSERVER || currentPid != getpid())) {
             ALOGW("%s: attempt to create a spatializer effect from uid/pid %d/%d",
                     __func__, callingUid, currentPid);
@@ -4482,7 +4483,7 @@
             if (effect->state() == IAfEffectModule::ACTIVE ||
                     effect->state() == IAfEffectModule::STOPPING) {
                 ++started;
-                effect->start();
+                effect->start_l();
             }
         }
         dstChain->mutex().unlock();
@@ -4585,7 +4586,7 @@
         // removeEffect_l() has stopped the effect if it was active so it must be restarted
         if (effect->state() == IAfEffectModule::ACTIVE ||
             effect->state() == IAfEffectModule::STOPPING) {
-            effect->start();
+            effect->start_l();
         }
     }
 
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index 201d147..feae97e 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -143,7 +143,7 @@
         if (lStatus == NO_ERROR) {
             lStatus = effect->addHandle(handle.get());
             if (lStatus == NO_ERROR) {
-                lStatus = effect->init(patches);
+                lStatus = effect->init_l(patches);
                 if (lStatus == NAME_NOT_FOUND) {
                     lStatus = NO_ERROR;
                 }
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index 7045c8b..287d838 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -139,7 +139,7 @@
     // check if effects should be suspended or restored when a given effect is enable or disabled
     void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect __unused,
                           bool enabled __unused, bool threadLocked __unused) final {}
-    void resetVolume() final {}
+    void resetVolume_l() final REQUIRES(audio_utils::EffectChain_Mutex) {}
     product_strategy_t strategy() const final { return static_cast<product_strategy_t>(0); }
     int32_t activeTrackCnt() const final { return 0; }
     void onEffectEnable(const sp<IAfEffectBase>& effect __unused) final {}
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 73a89e5..a02657e 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -570,10 +570,10 @@
     : EffectBase(callback, desc, id, sessionId, pinned),
       // clear mConfig to ensure consistent initial value of buffer framecount
       // in case buffers are associated by setInBuffer() or setOutBuffer()
-      // prior to configure().
+      // prior to configure_l().
       mConfig{{}, {}},
       mStatus(NO_INIT),
-      mMaxDisableWaitCnt(1), // set by configure(), should be >= 1
+      mMaxDisableWaitCnt(1), // set by configure_l(), should be >= 1
       mDisableWaitCnt(0),    // set by process() and updateState()
       mOffloaded(false),
       mIsOutput(false)
@@ -588,13 +588,13 @@
     if (mStatus != NO_ERROR) {
         return;
     }
-    lStatus = init();
+    lStatus = init_l();
     if (lStatus < 0) {
         mStatus = lStatus;
         goto Error;
     }
 
-    setOffloaded(callback->isOffload(), callback->io());
+    setOffloaded_l(callback->isOffload(), callback->io());
     ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
 
     return;
@@ -616,7 +616,7 @@
 
 }
 
-bool EffectModule::updateState() {
+bool EffectModule::updateState_l() {
     audio_utils::lock_guard _l(mutex());
 
     bool started = false;
@@ -632,7 +632,7 @@
                    0,
                    mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
         }
-        if (start_l() == NO_ERROR) {
+        if (start_ll() == NO_ERROR) {
             mState = ACTIVE;
             started = true;
         } else {
@@ -641,8 +641,8 @@
         break;
     case STOPPING:
         // volume control for offload and direct threads must take effect immediately.
-        if (stop_l() == NO_ERROR
-            && !(isVolumeControl() && isOffloadedOrDirect())) {
+        if (stop_ll() == NO_ERROR
+            && !(isVolumeControl() && isOffloadedOrDirect_l())) {
             mDisableWaitCnt = mMaxDisableWaitCnt;
         } else {
             mDisableWaitCnt = 1; // will cause immediate transition to IDLE
@@ -836,9 +836,9 @@
     mEffectInterface->command(EFFECT_CMD_RESET, 0, NULL, &replySize, &reply);
 }
 
-status_t EffectModule::configure()
+status_t EffectModule::configure_l()
 {
-    ALOGVV("configure() started");
+    ALOGVV("%s started", __func__);
     status_t status;
     uint32_t size;
     audio_channel_mask_t channelMask;
@@ -879,7 +879,7 @@
     mConfig.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
 
     // Don't use sample rate for thread if effect isn't offloadable.
-    if (callback->isOffloadOrDirect() && !isOffloaded()) {
+    if (callback->isOffloadOrDirect() && !isOffloaded_l()) {
         mConfig.inputCfg.samplingRate = DEFAULT_OUTPUT_SAMPLE_RATE;
         ALOGV("Overriding effect input as 48kHz");
     } else {
@@ -909,9 +909,9 @@
     mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
     mIsOutput = callback->isOutput();
 
-    ALOGV("configure() %p chain %p buffer %p framecount %zu",
-          this, callback->chain().promote().get(),
-          mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
+    ALOGV("%s %p chain %p buffer %p framecount %zu", __func__, this,
+          callback->chain().promote().get(), mConfig.inputCfg.buffer.raw,
+          mConfig.inputCfg.buffer.frameCount);
 
     status_t cmdStatus;
     size = sizeof(int);
@@ -1012,11 +1012,11 @@
 exit:
     // TODO: consider clearing mConfig on error.
     mStatus = status;
-    ALOGVV("configure ended");
+    ALOGVV("%s ended", __func__);
     return status;
 }
 
-status_t EffectModule::init()
+status_t EffectModule::init_l()
 {
     audio_utils::lock_guard _l(mutex());
     if (mEffectInterface == 0) {
@@ -1048,21 +1048,21 @@
     }
 }
 
-// start() must be called with PlaybackThread::mutex() or EffectChain::mutex() held
-status_t EffectModule::start()
+// start_l() must be called with EffectChain::mutex() held
+status_t EffectModule::start_l()
 {
     status_t status;
     {
         audio_utils::lock_guard _l(mutex());
-        status = start_l();
+        status = start_ll();
     }
     if (status == NO_ERROR) {
-        getCallback()->resetVolume();
+        getCallback()->resetVolume_l();
     }
     return status;
 }
 
-status_t EffectModule::start_l()
+status_t EffectModule::start_ll()
 {
     if (mEffectInterface == 0) {
         return NO_INIT;
@@ -1086,13 +1086,13 @@
     return status;
 }
 
-status_t EffectModule::stop()
+status_t EffectModule::stop_l()
 {
     audio_utils::lock_guard _l(mutex());
-    return stop_l();
+    return stop_ll();
 }
 
-status_t EffectModule::stop_l()
+status_t EffectModule::stop_ll()
 {
     if (mEffectInterface == 0) {
         return NO_INIT;
@@ -1103,11 +1103,11 @@
     status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
 
-    if (isVolumeControl() && isOffloadedOrDirect()) {
+    if (isVolumeControl() && isOffloadedOrDirect_l()) {
         // We have the EffectChain and EffectModule lock, permit a reentrant call to setVolume:
         // resetVolume_l --> setVolume_l --> EffectModule::setVolume
         mSetVolumeReentrantTid = gettid();
-        getCallback()->resetVolume();
+        getCallback()->resetVolume_l();
         mSetVolumeReentrantTid = INVALID_PID;
     }
 
@@ -1162,7 +1162,7 @@
                      std::vector<uint8_t>* reply)
 {
     audio_utils::lock_guard _l(mutex());
-    ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface.get());
+    ALOGVV("%s, cmdCode: %d, mEffectInterface: %p", __func__, cmdCode, mEffectInterface.get());
 
     if (mState == DESTROYED || mEffectInterface == 0) {
         return NO_INIT;
@@ -1258,20 +1258,20 @@
     }
 }
 
-bool EffectModule::isOffloadedOrDirect() const
+bool EffectModule::isOffloadedOrDirect_l() const
 {
     return getCallback()->isOffloadOrDirect();
 }
 
-bool EffectModule::isVolumeControlEnabled() const
+bool EffectModule::isVolumeControlEnabled_l() const
 {
-    return (isVolumeControl() && (isOffloadedOrDirect() ? isEnabled() : isProcessEnabled()));
+    return (isVolumeControl() && (isOffloadedOrDirect_l() ? isEnabled() : isProcessEnabled()));
 }
 
 void EffectModule::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
     ALOGVV("setInBuffer %p",(&buffer));
 
-    // mConfig.inputCfg.buffer.frameCount may be zero if configure() is not called yet.
+    // mConfig.inputCfg.buffer.frameCount may be zero if configure_l() is not called yet.
     if (buffer != 0) {
         mConfig.inputCfg.buffer.raw = buffer->audioBuffer()->raw;
         buffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
@@ -1317,7 +1317,7 @@
 void EffectModule::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
     ALOGVV("setOutBuffer %p",(&buffer));
 
-    // mConfig.outputCfg.buffer.frameCount may be zero if configure() is not called yet.
+    // mConfig.outputCfg.buffer.frameCount may be zero if configure_l() is not called yet.
     if (buffer != 0) {
         mConfig.outputCfg.buffer.raw = buffer->audioBuffer()->raw;
         buffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
@@ -1356,8 +1356,7 @@
     }
 }
 
-status_t EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
-{
+status_t EffectModule::setVolume(uint32_t* left, uint32_t* right, bool controller) {
     AutoLockReentrant _l(mutex(), mSetVolumeReentrantTid);
     if (mStatus != NO_ERROR) {
         return mStatus;
@@ -1480,7 +1479,7 @@
     return status;
 }
 
-status_t EffectModule::setOffloaded(bool offloaded, audio_io_handle_t io)
+status_t EffectModule::setOffloaded_l(bool offloaded, audio_io_handle_t io)
 {
     audio_utils::lock_guard _l(mutex());
     if (mStatus != NO_ERROR) {
@@ -1509,11 +1508,11 @@
         }
         mOffloaded = false;
     }
-    ALOGV("setOffloaded() offloaded %d io %d status %d", offloaded, io, status);
+    ALOGV("%s offloaded %d io %d status %d", __func__, offloaded, io, status);
     return status;
 }
 
-bool EffectModule::isOffloaded() const
+bool EffectModule::isOffloaded_l() const
 {
     audio_utils::lock_guard _l(mutex());
     return mOffloaded;
@@ -1528,8 +1527,16 @@
     return IAfEffectModule::isHapticGenerator(&mDescriptor.type);
 }
 
-status_t EffectModule::setHapticIntensity(int id, os::HapticScale intensity)
-{
+/*static*/
+bool IAfEffectModule::isSpatializer(const effect_uuid_t *type) {
+    return memcmp(type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0;
+}
+
+bool EffectModule::isSpatializer() const {
+    return IAfEffectModule::isSpatializer(&mDescriptor.type);
+}
+
+status_t EffectModule::setHapticIntensity_l(int id, os::HapticScale intensity) {
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
@@ -1554,8 +1561,7 @@
     return status;
 }
 
-status_t EffectModule::setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo)
-{
+status_t EffectModule::setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo) {
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
@@ -1584,8 +1590,8 @@
     return status;
 }
 
-status_t EffectModule::getConfigs(
-        audio_config_base_t* inputCfg, audio_config_base_t* outputCfg, bool* isOutput) const {
+status_t EffectModule::getConfigs_l(audio_config_base_t* inputCfg, audio_config_base_t* outputCfg,
+                                    bool* isOutput) const {
     audio_utils::lock_guard _l(mutex());
     if (mConfig.inputCfg.mask == 0 || mConfig.outputCfg.mask == 0) {
         return NO_INIT;
@@ -1600,6 +1606,35 @@
     return NO_ERROR;
 }
 
+status_t EffectModule::sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata) {
+    if (mStatus != NO_ERROR) {
+        return mStatus;
+    }
+    // TODO b/307368176: send all metadata to effects if requested by the implementation.
+    // For now only send channel mask to Spatializer.
+    if (!isSpatializer()) {
+        return INVALID_OPERATION;
+    }
+
+    std::vector<uint8_t> request(
+            sizeof(effect_param_t) + sizeof(int32_t) + metadata.size() * sizeof(uint32_t));
+    effect_param_t *param = (effect_param_t*) request.data();
+    param->psize = sizeof(int32_t);
+    param->vsize = metadata.size() * sizeof(uint32_t);
+    *(int32_t*)param->data = SPATIALIZER_PARAM_INPUT_CHANNEL_MASK;
+    uint32_t* channelMasks = reinterpret_cast<uint32_t*>(param->data + sizeof(int32_t));
+    for (auto m : metadata) {
+        *channelMasks++ = m.channel_mask;
+    }
+    std::vector<uint8_t> response;
+    status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
+    if (status == NO_ERROR) {
+        LOG_ALWAYS_FATAL_IF(response.size() != sizeof(status_t));
+        status = *reinterpret_cast<const status_t*>(response.data());
+    }
+    return status;
+}
+
 static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
     std::stringstream ss;
 
@@ -1910,7 +1945,7 @@
     audio_config_base_t inputCfg = AUDIO_CONFIG_BASE_INITIALIZER;
     audio_config_base_t outputCfg = AUDIO_CONFIG_BASE_INITIALIZER;
     bool isOutput;
-    status_t status = effectModule->getConfigs(&inputCfg, &outputCfg, &isOutput);
+    status_t status = effectModule->getConfigs_l(&inputCfg, &outputCfg, &isOutput);
     if (status == NO_ERROR) {
         constexpr bool isInput = false; // effects always use 'OUT' channel masks.
         _config->inputCfg = VALUE_OR_RETURN_STATUS_AS_OUT(
@@ -2176,7 +2211,7 @@
     return 0;
 }
 
-std::vector<int> EffectChain::getEffectIds() const
+std::vector<int> EffectChain::getEffectIds_l() const
 {
     std::vector<int> ids;
     audio_utils::lock_guard _l(mutex());
@@ -2206,8 +2241,7 @@
 }
 
 // Must be called with EffectChain::mutex() locked
-void EffectChain::process_l()
-{
+void EffectChain::process_l() {
     // never process effects when:
     // - on an OFFLOAD thread
     // - no more tracks are on the session and the effect tail has been rendered
@@ -2250,7 +2284,7 @@
     }
     bool doResetVolume = false;
     for (size_t i = 0; i < size; i++) {
-        doResetVolume = mEffects[i]->updateState() || doResetVolume;
+        doResetVolume = mEffects[i]->updateState_l() || doResetVolume;
     }
     if (doResetVolume) {
         resetVolume_l();
@@ -2304,14 +2338,14 @@
                 numSamples * sizeof(float), &halBuffer);
         if (result != OK) return result;
 
-        effect->configure();
+        effect->configure_l();
 
         effect->setInBuffer(halBuffer);
         // auxiliary effects output samples to chain input buffer for further processing
         // by insert effects
         effect->setOutBuffer(mInBuffer);
     } else {
-        ssize_t idx_insert = getInsertIndex(desc);
+        ssize_t idx_insert = getInsertIndex_ll(desc);
         if (idx_insert < 0) {
             return INVALID_OPERATION;
         }
@@ -2319,7 +2353,7 @@
         size_t previousSize = mEffects.size();
         mEffects.insertAt(effect, idx_insert);
 
-        effect->configure();
+        effect->configure_l();
 
         // - By default:
         //   All effects read samples from chain input buffer.
@@ -2334,9 +2368,9 @@
             effect->setOutBuffer(mOutBuffer);
             if (idx_insert == 0) {
                 if (previousSize != 0) {
-                    mEffects[1]->configure();
+                    mEffects[1]->configure_l();
                     mEffects[1]->setInBuffer(mOutBuffer);
-                    mEffects[1]->updateAccessMode();      // reconfig if neeeded.
+                    mEffects[1]->updateAccessMode_l();  // reconfig if needed.
                 }
                 effect->setInBuffer(mInBuffer);
             } else {
@@ -2346,9 +2380,9 @@
             effect->setInBuffer(mInBuffer);
             if (idx_insert == static_cast<ssize_t>(previousSize)) {
                 if (idx_insert != 0) {
-                    mEffects[idx_insert-1]->configure();
+                    mEffects[idx_insert-1]->configure_l();
                     mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
-                    mEffects[idx_insert - 1]->updateAccessMode();      // reconfig if neeeded.
+                    mEffects[idx_insert - 1]->updateAccessMode_l();  // reconfig if needed.
                 }
                 effect->setOutBuffer(mOutBuffer);
             } else {
@@ -2358,21 +2392,21 @@
         ALOGV("%s effect %p, added in chain %p at rank %zu",
                 __func__, effect.get(), this, idx_insert);
     }
-    effect->configure();
+    effect->configure_l();
 
     return NO_ERROR;
 }
 
 std::optional<size_t> EffectChain::findVolumeControl_l(size_t from, size_t to) const {
     for (size_t i = std::min(to, mEffects.size()); i > from; i--) {
-        if (mEffects[i - 1]->isVolumeControlEnabled()) {
+        if (mEffects[i - 1]->isVolumeControlEnabled_l()) {
             return i - 1;
         }
     }
     return std::nullopt;
 }
 
-ssize_t EffectChain::getInsertIndex(const effect_descriptor_t& desc) {
+ssize_t EffectChain::getInsertIndex_ll(const effect_descriptor_t& desc) {
     // Insert effects are inserted at the end of mEffects vector as they are processed
     //  after track and auxiliary effects.
     // Insert effect order as a function of indicated preference:
@@ -2387,7 +2421,7 @@
     // already present
     // Spatializer or Downmixer effects are inserted in first position because
     // they adapt the channel count for all other effects in the chain
-    if ((memcmp(&desc.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0)
+    if (IAfEffectModule::isSpatializer(&desc.type)
             || (memcmp(&desc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0)) {
         return 0;
     }
@@ -2460,7 +2494,7 @@
             // the middle of a read from audio HAL
             if (mEffects[i]->state() == EffectModule::ACTIVE ||
                     mEffects[i]->state() == EffectModule::STOPPING) {
-                mEffects[i]->stop();
+                mEffects[i]->stop_l();
             }
             if (release) {
                 mEffects[i]->release_l();
@@ -2468,9 +2502,9 @@
 
             if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
                 if (i == size - 1 && i != 0) {
-                    mEffects[i - 1]->configure();
+                    mEffects[i - 1]->configure_l();
                     mEffects[i - 1]->setOutBuffer(mOutBuffer);
-                    mEffects[i - 1]->updateAccessMode();      // reconfig if neeeded.
+                    mEffects[i - 1]->updateAccessMode_l();      // reconfig if needed.
                 }
             }
             mEffects.removeAt(i);
@@ -2479,9 +2513,9 @@
             // is updated if needed (can switch from HAL channel mask to mixer channel mask)
             if (type != EFFECT_FLAG_TYPE_AUXILIARY // TODO(b/284522658) breaks for aux FX, why?
                     && i == 0 && size > 1) {
-                mEffects[0]->configure();
+                mEffects[0]->configure_l();
                 mEffects[0]->setInBuffer(mInBuffer);
-                mEffects[0]->updateAccessMode();      // reconfig if neeeded.
+                mEffects[0]->updateAccessMode_l();      // reconfig if needed.
             }
 
             ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
@@ -2531,14 +2565,19 @@
 
 bool EffectChain::hasVolumeControlEnabled_l() const {
     for (const auto &effect : mEffects) {
-        if (effect->isVolumeControlEnabled()) return true;
+        if (effect->isVolumeControlEnabled_l()) return true;
     }
     return false;
 }
 
-// setVolume_l() must be called with IAfThreadBase::mutex() or EffectChain::mutex() held
-bool EffectChain::setVolume_l(uint32_t *left, uint32_t *right, bool force)
-{
+// setVolume() must be called without EffectChain::mutex()
+bool EffectChain::setVolume(uint32_t* left, uint32_t* right, bool force) {
+    audio_utils::lock_guard _l(mutex());
+    return setVolume_l(left, right, force);
+}
+
+// setVolume_l() must be called with EffectChain::mutex() held
+bool EffectChain::setVolume_l(uint32_t* left, uint32_t* right, bool force) {
     uint32_t newLeft = *left;
     uint32_t newRight = *right;
     const size_t size = mEffects.size();
@@ -2613,7 +2652,7 @@
     return volumeControlIndex.has_value();
 }
 
-// resetVolume_l() must be called with IAfThreadBase::mutex() or EffectChain::mutex() held
+// resetVolume_l() must be called with EffectChain::mutex() held
 void EffectChain::resetVolume_l()
 {
     if ((mLeftVolume != UINT_MAX) && (mRightVolume != UINT_MAX)) {
@@ -2639,11 +2678,11 @@
 {
     audio_utils::lock_guard _l(mutex());
     for (size_t i = 0; i < mEffects.size(); ++i) {
-        mEffects[i]->setHapticIntensity(id, intensity);
+        mEffects[i]->setHapticIntensity_l(id, intensity);
     }
 }
 
-void EffectChain::syncHalEffectsState()
+void EffectChain::syncHalEffectsState_l()
 {
     audio_utils::lock_guard _l(mutex());
     for (size_t i = 0; i < mEffects.size(); i++) {
@@ -2712,7 +2751,7 @@
         }
 
         if (desc->mRefCount++ == 0) {
-            sp<IAfEffectModule> effect = getEffectIfEnabled(type);
+            sp<IAfEffectModule> effect = getEffectIfEnabled_l(type);
             if (effect != 0) {
                 desc->mEffect = effect;
                 effect->setSuspended(true);
@@ -2765,7 +2804,7 @@
         }
         if (desc->mRefCount++ == 0) {
             Vector< sp<IAfEffectModule> > effects;
-            getSuspendEligibleEffects(effects);
+            getSuspendEligibleEffects_l(effects);
             for (size_t i = 0; i < effects.size(); i++) {
                 setEffectSuspended_l(&effects[i]->desc().type, true);
             }
@@ -2806,8 +2845,7 @@
 #endif //OPENSL_ES_H_
 
 /* static */
-bool EffectChain::isEffectEligibleForBtNrecSuspend(const effect_uuid_t *type)
-{
+bool EffectChain::isEffectEligibleForBtNrecSuspend_l(const effect_uuid_t* type) {
     // Only NS and AEC are suspended when BtNRec is off
     if ((memcmp(type, FX_IID_AEC, sizeof(effect_uuid_t)) == 0) ||
         (memcmp(type, FX_IID_NS, sizeof(effect_uuid_t)) == 0)) {
@@ -2816,7 +2854,7 @@
     return false;
 }
 
-bool EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc)
+bool EffectChain::isEffectEligibleForSuspend_l(const effect_descriptor_t& desc)
 {
     // auxiliary effects and visualizer are never suspended on output mix
     if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
@@ -2829,26 +2867,24 @@
     return true;
 }
 
-void EffectChain::getSuspendEligibleEffects(
+void EffectChain::getSuspendEligibleEffects_l(
         Vector< sp<IAfEffectModule> > &effects)
 {
     effects.clear();
     for (size_t i = 0; i < mEffects.size(); i++) {
-        if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
+        if (isEffectEligibleForSuspend_l(mEffects[i]->desc())) {
             effects.add(mEffects[i]);
         }
     }
 }
 
-sp<IAfEffectModule> EffectChain::getEffectIfEnabled(const effect_uuid_t *type)
+sp<IAfEffectModule> EffectChain::getEffectIfEnabled_l(const effect_uuid_t *type)
 {
     sp<IAfEffectModule> effect = getEffectFromType_l(type);
     return effect != 0 && effect->isEnabled() ? effect : 0;
 }
 
-void EffectChain::checkSuspendOnEffectEnabled(const sp<IAfEffectModule>& effect,
-                                                            bool enabled)
-{
+void EffectChain::checkSuspendOnEffectEnabled_l(const sp<IAfEffectModule>& effect, bool enabled) {
     ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
     if (enabled) {
         if (index < 0) {
@@ -2857,18 +2893,17 @@
             if (index < 0) {
                 return;
             }
-            if (!isEffectEligibleForSuspend(effect->desc())) {
+            if (!isEffectEligibleForSuspend_l(effect->desc())) {
                 return;
             }
             setEffectSuspended_l(&effect->desc().type, enabled);
             index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
             if (index < 0) {
-                ALOGW("checkSuspendOnEffectEnabled() Fx should be suspended here!");
+                ALOGW("%s Fx should be suspended here!", __func__);
                 return;
             }
         }
-        ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x",
-            effect->desc().type.timeLow);
+        ALOGV("%s enable suspending fx %08x", __func__, effect->desc().type.timeLow);
         sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
         // if effect is requested to suspended but was not yet enabled, suspend it now.
         if (desc->mEffect == 0) {
@@ -2880,8 +2915,7 @@
         if (index < 0) {
             return;
         }
-        ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x",
-            effect->desc().type.timeLow);
+        ALOGV("%s disable restoring fx %08x", __func__, effect->desc().type.timeLow);
         sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
         desc->mEffect.clear();
         effect->setSuspended(false);
@@ -2983,6 +3017,20 @@
     return true;
 }
 
+// sendMetadata_l() must be called with thread->mutex() held
+void EffectChain::sendMetadata_l(const std::vector<playback_track_metadata_v7_t>& allMetadata,
+        const std::optional<const std::vector<playback_track_metadata_v7_t>> spatializedMetadata) {
+    audio_utils::lock_guard _l(mutex());
+    for (const auto& effect : mEffects) {
+        if (spatializedMetadata.has_value()
+                && IAfEffectModule::isSpatializer(&effect->desc().type)) {
+            effect->sendMetadata_ll(spatializedMetadata.value());
+        } else {
+            effect->sendMetadata_ll(allMetadata);
+        }
+    }
+}
+
 // EffectCallbackInterface implementation
 status_t EffectChain::EffectCallback::createEffectHal(
         const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t deviceId,
@@ -3196,8 +3244,9 @@
     t->setVolumeForOutput_l(left, right);
 }
 
-void EffectChain::EffectCallback::checkSuspendOnEffectEnabled(
-        const sp<IAfEffectBase>& effect, bool enabled, bool threadLocked) {
+void EffectChain::EffectCallback::checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect,
+                                                              bool enabled, bool threadLocked)
+        NO_THREAD_SAFETY_ANALYSIS {
     const sp<IAfThreadBase> t = thread().promote();
     if (t == nullptr) {
         return;
@@ -3209,7 +3258,7 @@
         return;
     }
     // in EffectChain context, an EffectBase is always from an EffectModule so static cast is safe
-    c->checkSuspendOnEffectEnabled(effect->asEffectModule(), enabled);
+    c->checkSuspendOnEffectEnabled_l(effect->asEffectModule(), enabled);
 }
 
 void EffectChain::EffectCallback::onEffectEnable(const sp<IAfEffectBase>& effect) {
@@ -3241,7 +3290,7 @@
     return true;
 }
 
-void EffectChain::EffectCallback::resetVolume() {
+void EffectChain::EffectCallback::resetVolume_l() {
     sp<IAfEffectChain> c = chain().promote();
     if (c == nullptr) {
         return;
@@ -3302,7 +3351,7 @@
     return status;
 }
 
-status_t DeviceEffectProxy::init(
+status_t DeviceEffectProxy::init_l(
         const std::map <audio_patch_handle_t, IAfPatchPanel::Patch>& patches) {
 //For all audio patches
 //If src or sink device match
@@ -3406,7 +3455,7 @@
             } else {
                 mHalEffect->setDevices({mDevice});
             }
-            mHalEffect->configure();
+            mHalEffect->configure_l();
         }
         *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
                                    mNotifyFramesProcessed);
@@ -3695,7 +3744,7 @@
     if (effect == nullptr) {
         return;
     }
-    effect->start();
+    effect->start_l();
 }
 
 void DeviceEffectProxy::ProxyCallback::onEffectDisable(
@@ -3704,7 +3753,7 @@
     if (effect == nullptr) {
         return;
     }
-    effect->stop();
+    effect->stop_l();
 }
 
 } // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 9208c88..6e34e9b 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -78,11 +78,11 @@
                         { return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
                             == EFFECT_FLAG_VOLUME_MONITOR; }
 
-    status_t setEnabled(bool enabled, bool fromHandle) override;
-    status_t setEnabled_l(bool enabled) final;
+    status_t setEnabled(bool enabled, bool fromHandle) override EXCLUDES_EffectBase_Mutex;
+    status_t setEnabled_l(bool enabled) final REQUIRES(audio_utils::EffectBase_Mutex);
     bool isEnabled() const final;
-    void setSuspended(bool suspended) final;
-    bool suspended() const final;
+    void setSuspended(bool suspended) final EXCLUDES_EffectBase_Mutex;
+    bool suspended() const final EXCLUDES_EffectBase_Mutex;
 
     status_t command(int32_t __unused,
                              const std::vector<uint8_t>& __unused,
@@ -99,36 +99,40 @@
         return mCallback.load();
     }
 
-    status_t addHandle(IAfEffectHandle *handle) final;
-    ssize_t disconnectHandle(IAfEffectHandle *handle, bool unpinIfLast) final;
-    ssize_t removeHandle(IAfEffectHandle *handle) final;
-    ssize_t removeHandle_l(IAfEffectHandle *handle) final;
-    IAfEffectHandle* controlHandle_l() final;
-    bool purgeHandles() final;
+    status_t addHandle(IAfEffectHandle* handle) final EXCLUDES_EffectBase_Mutex;
+    ssize_t disconnectHandle(IAfEffectHandle* handle,
+                             bool unpinIfLast) final EXCLUDES_EffectBase_Mutex;
+    ssize_t removeHandle(IAfEffectHandle* handle) final EXCLUDES_EffectBase_Mutex;
+    ssize_t removeHandle_l(IAfEffectHandle* handle) final REQUIRES(audio_utils::EffectBase_Mutex);
+    IAfEffectHandle* controlHandle_l() final REQUIRES(audio_utils::EffectBase_Mutex);
+    bool purgeHandles() final EXCLUDES_EffectBase_Mutex;
 
-    void             checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) final;
+    void checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) final;
 
-    bool             isPinned() const final { return mPinned; }
-    void             unPin() final { mPinned = false; }
+    bool isPinned() const final { return mPinned; }
+    void unPin() final { mPinned = false; }
 
-    audio_utils::mutex& mutex() const final { return mMutex; }
+    audio_utils::mutex& mutex() const final
+            RETURN_CAPABILITY(android::audio_utils::EffectBase_Mutex) {
+        return mMutex;
+    }
 
-    status_t         updatePolicyState() final;
+    status_t updatePolicyState() final EXCLUDES_EffectBase_Mutex;
 
     sp<IAfEffectModule> asEffectModule() override { return nullptr; }
     sp<IAfDeviceEffectProxy> asDeviceEffectProxy() override { return nullptr; }
 
-    void             dump(int fd, const Vector<String16>& args) const override;
+    void dump(int fd, const Vector<String16>& args) const override;
 
 protected:
-    bool             isInternal_l() const {
-                         for (auto handle : mHandles) {
-                            if (handle->client() != nullptr) {
-                                return false;
-                            }
-                         }
-                         return true;
-                     }
+    bool isInternal_l() const REQUIRES(audio_utils::EffectBase_Mutex) {
+        for (auto handle : mHandles) {
+            if (handle->client() != nullptr) {
+                return false;
+            }
+        }
+        return true;
+    }
 
     bool             mPinned = false;
 
@@ -150,7 +154,10 @@
     // Audio policy effect state management
     // Mutex protecting transactions with audio policy manager as mutex() cannot
     // be held to avoid cross deadlocks with audio policy mutex
-    audio_utils::mutex& policyMutex() const { return mPolicyMutex; }
+    audio_utils::mutex& policyMutex() const
+            RETURN_CAPABILITY(android::audio_utils::EffectBase_PolicyMutex) {
+        return mPolicyMutex;
+    }
     mutable audio_utils::mutex mPolicyMutex{audio_utils::MutexOrder::kEffectBase_PolicyMutex};
     // Effect is registered in APM or not
     bool                      mPolicyRegistered = false;
@@ -175,25 +182,23 @@
                     int id,
                     audio_session_t sessionId,
                     bool pinned,
-                    audio_port_handle_t deviceId);
-    ~EffectModule() override;
+                    audio_port_handle_t deviceId) REQUIRES(audio_utils::EffectChain_Mutex);
+    ~EffectModule() override REQUIRES(audio_utils::EffectChain_Mutex);
 
-    void process() final;
-    bool updateState() final;
-    status_t command(int32_t cmdCode,
-                     const std::vector<uint8_t>& cmdData,
-                     int32_t maxReplySize,
-                     std::vector<uint8_t>* reply) final;
+    void process() final EXCLUDES_EffectBase_Mutex;
+    bool updateState_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t command(int32_t cmdCode, const std::vector<uint8_t>& cmdData, int32_t maxReplySize,
+                     std::vector<uint8_t>* reply) final EXCLUDES_EffectBase_Mutex;
 
-    void reset_l() final;
-    status_t configure() final;
-    status_t init() final;
+    void reset_l() final REQUIRES(audio_utils::EffectBase_Mutex);
+    status_t configure_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    status_t init_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
     uint32_t status() const final {
         return mStatus;
     }
     bool isProcessEnabled() const final;
-    bool isOffloadedOrDirect() const final;
-    bool isVolumeControlEnabled() const final;
+    bool isOffloadedOrDirect_l() const final REQUIRES(audio_utils::EffectChain_Mutex);
+    bool isVolumeControlEnabled_l() const final REQUIRES(audio_utils::EffectChain_Mutex);
     void setInBuffer(const sp<EffectBufferHalInterface>& buffer) final;
     int16_t *inBuffer() const final {
         return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
@@ -203,34 +208,42 @@
         return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
     }
     // Updates the access mode if it is out of date.  May issue a new effect configure.
-    void updateAccessMode() final {
-                    if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
-                        configure();
-                    }
-                }
-    status_t setDevices(const AudioDeviceTypeAddrVector &devices) final;
-    status_t setInputDevice(const AudioDeviceTypeAddr &device) final;
+    void updateAccessMode_l() final REQUIRES(audio_utils::EffectChain_Mutex) {
+        if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
+            configure_l();
+        }
+    }
+    status_t setDevices(const AudioDeviceTypeAddrVector& devices) final EXCLUDES_EffectBase_Mutex;
+    status_t setInputDevice(const AudioDeviceTypeAddr& device) final EXCLUDES_EffectBase_Mutex;
     status_t setVolume(uint32_t *left, uint32_t *right, bool controller) final;
-    status_t setMode(audio_mode_t mode) final;
-    status_t setAudioSource(audio_source_t source) final;
-    status_t start() final;
-    status_t stop() final;
+    status_t setMode(audio_mode_t mode) final EXCLUDES_EffectBase_Mutex;
+    status_t setAudioSource(audio_source_t source) final EXCLUDES_EffectBase_Mutex;
+    status_t start_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t stop_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
 
-    status_t setOffloaded(bool offloaded, audio_io_handle_t io) final;
-    bool isOffloaded() const final;
-    void addEffectToHal_l() final;
-    void release_l() final;
+    status_t setOffloaded_l(bool offloaded, audio_io_handle_t io) final
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    bool isOffloaded_l() const final
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    void addEffectToHal_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    void release_l() final REQUIRES(audio_utils::EffectChain_Mutex);
 
     sp<IAfEffectModule> asEffectModule() final { return this; }
 
     bool isHapticGenerator() const final;
+    bool isSpatializer() const final;
 
-    status_t setHapticIntensity(int id, os::HapticScale intensity) final;
-    status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo) final;
+    status_t setHapticIntensity_l(int id, os::HapticScale intensity) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata) final
+            REQUIRES(audio_utils::ThreadBase_Mutex,
+                     audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
 
-    status_t getConfigs(audio_config_base_t* inputCfg,
-                                audio_config_base_t* outputCfg,
-                                bool* isOutput) const final;
+    status_t getConfigs_l(audio_config_base_t* inputCfg, audio_config_base_t* outputCfg,
+                          bool* isOutput) const final
+            REQUIRES(audio_utils::EffectHandle_Mutex) EXCLUDES_EffectBase_Mutex;
 
     void dump(int fd, const Vector<String16>& args) const final;
 
@@ -241,9 +254,9 @@
 
     DISALLOW_COPY_AND_ASSIGN(EffectModule);
 
-    status_t start_l();
-    status_t stop_l();
-    status_t removeEffectFromHal_l();
+    status_t start_ll() REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
+    status_t stop_ll() REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
+    status_t removeEffectFromHal_l() REQUIRES(audio_utils::EffectChain_Mutex);
     status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
     effect_buffer_access_e requiredEffectBufferAccessMode() const {
         return mConfig.inputCfg.buffer.raw == mConfig.outputCfg.buffer.raw
@@ -366,7 +379,9 @@
 private:
     DISALLOW_COPY_AND_ASSIGN(EffectHandle);
 
-    audio_utils::mutex& mutex() const { return mMutex; }
+    audio_utils::mutex& mutex() const RETURN_CAPABILITY(android::audio_utils::EffectHandle_Mutex) {
+        return mMutex;
+    }
     // protects IEffect method calls
     mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kEffectHandle_Mutex};
     const wp<IAfEffectBase> mEffect;               // pointer to controlled EffectModule
@@ -399,34 +414,43 @@
 public:
     EffectChain(const sp<IAfThreadBase>& thread, audio_session_t sessionId);
 
-    void process_l() final;
+    void process_l() final REQUIRES(audio_utils::EffectChain_Mutex);
 
-    audio_utils::mutex& mutex() const final { return mMutex; }
+    audio_utils::mutex& mutex() const final RETURN_CAPABILITY(audio_utils::EffectChain_Mutex) {
+        return mMutex;
+    }
 
-    status_t createEffect_l(sp<IAfEffectModule>& effect,
-                            effect_descriptor_t *desc,
-                            int id,
-                            audio_session_t sessionId,
-                            bool pinned) final;
-    status_t addEffect_l(const sp<IAfEffectModule>& handle) final;
-    status_t addEffect_ll(const sp<IAfEffectModule>& handle) final;
-    size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) final;
+    status_t createEffect_l(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
+                            audio_session_t sessionId, bool pinned) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+    status_t addEffect_l(const sp<IAfEffectModule>& handle) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+    status_t addEffect_ll(const sp<IAfEffectModule>& handle) final
+            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
+    size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
     audio_session_t sessionId() const final { return mSessionId; }
     void setSessionId(audio_session_t sessionId) final { mSessionId = sessionId; }
 
-    sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor) const final;
-    sp<IAfEffectModule> getEffectFromId_l(int id) const final;
-    sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t *type) const final;
-    std::vector<int> getEffectIds() const final;
+    sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t* descriptor) const final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    sp<IAfEffectModule> getEffectFromId_l(int id) const final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t* type) const final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    std::vector<int> getEffectIds_l() const final REQUIRES(audio_utils::ThreadBase_Mutex);
     // FIXME use float to improve the dynamic range
 
-    bool setVolume_l(uint32_t *left, uint32_t *right, bool force = false) final;
-    void resetVolume_l() final;
-    void setDevices_l(const AudioDeviceTypeAddrVector &devices) final;
-    void setInputDevice_l(const AudioDeviceTypeAddr &device) final;
-    void setMode_l(audio_mode_t mode) final;
-    void setAudioSource_l(audio_source_t source) final;
+    bool setVolume(uint32_t* left, uint32_t* right,
+                   bool force = false) final EXCLUDES_EffectChain_Mutex;
+    void resetVolume_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    void setDevices_l(const AudioDeviceTypeAddrVector& devices) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    void setInputDevice_l(const AudioDeviceTypeAddr& device) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    void setMode_l(audio_mode_t mode) final REQUIRES(audio_utils::ThreadBase_Mutex);
+    void setAudioSource_l(audio_source_t source) final REQUIRES(audio_utils::ThreadBase_Mutex);
 
     void setInBuffer(const sp<EffectBufferHalInterface>& buffer) final {
         mInBuffer = buffer;
@@ -457,21 +481,22 @@
 
     // suspend or restore effects of the specified type. The number of suspend requests is counted
     // and restore occurs once all suspend requests are cancelled.
-    void setEffectSuspended_l(const effect_uuid_t *type,
-                              bool suspend) final;
+    void setEffectSuspended_l(const effect_uuid_t* type, bool suspend) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
     // suspend all eligible effects
-    void setEffectSuspendedAll_l(bool suspend) final;
+    void setEffectSuspendedAll_l(bool suspend) final REQUIRES(audio_utils::ThreadBase_Mutex);
     // check if effects should be suspended or restored when a given effect is enable or disabled
-    void checkSuspendOnEffectEnabled(
-            const sp<IAfEffectModule>& effect, bool enabled) final;
+    void checkSuspendOnEffectEnabled_l(const sp<IAfEffectModule>& effect, bool enabled) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
-    void clearInputBuffer() final;
+    void clearInputBuffer() final EXCLUDES_EffectChain_Mutex;
 
     // At least one non offloadable effect in the chain is enabled
-    bool isNonOffloadableEnabled() const final;
-    bool isNonOffloadableEnabled_l() const final;
+    bool isNonOffloadableEnabled() const final EXCLUDES_EffectChain_Mutex;
+    bool isNonOffloadableEnabled_l() const final REQUIRES(audio_utils::EffectChain_Mutex);
 
-    void syncHalEffectsState() final;
+    void syncHalEffectsState_l()
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex final;
 
     // flags is an ORed set of audio_output_flags_t which is updated on return.
     void checkOutputFlagCompatibility(audio_output_flags_t *flags) const final;
@@ -490,12 +515,13 @@
 
     // isCompatibleWithThread_l() must be called with thread->mutex() held
     bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
     // Requires either IAfThreadBase::mutex() or EffectChain::mutex() held
     bool containsHapticGeneratingEffect_l() final;
 
-    void setHapticIntensity_l(int id, os::HapticScale intensity) final;
+    void setHapticIntensity_l(int id, os::HapticScale intensity) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
     sp<EffectCallbackInterface> effectCallback() const final { return mEffectCallback; }
 
@@ -513,9 +539,15 @@
         return mEffects[index];
     }
 
-    void setThread(const sp<IAfThreadBase>& thread) final;
+    void sendMetadata_l(const std::vector<playback_track_metadata_v7_t>& allMetadata,
+        const std::optional<const std::vector<playback_track_metadata_v7_t>> spatializedMetadata)
+            final REQUIRES(audio_utils::ThreadBase_Mutex);
 
-private:
+    void setThread(const sp<IAfThreadBase>& thread) final EXCLUDES_EffectChain_Mutex;
+
+  private:
+    bool setVolume_l(uint32_t* left, uint32_t* right, bool force = false)
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
     // For transaction consistency, please consider holding the EffectChain lock before
     // calling the EffectChain::EffectCallback methods, excepting
@@ -562,9 +594,10 @@
         void setVolumeForOutput(float left, float right) const override;
 
         // check if effects should be suspended/restored when a given effect is enable/disabled
-        void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect,
-                              bool enabled, bool threadLocked) override;
-        void resetVolume() override;
+        void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect, bool enabled,
+                                         bool threadLocked) override;
+        void resetVolume_l() override
+                REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
         product_strategy_t strategy() const override;
         int32_t activeTrackCnt() const override;
         void onEffectEnable(const sp<IAfEffectBase>& effect) override;
@@ -604,27 +637,34 @@
 
     // get a list of effect modules to suspend when an effect of the type
     // passed is enabled.
-    void  getSuspendEligibleEffects(Vector<sp<IAfEffectModule>> &effects);
+    void getSuspendEligibleEffects_l(Vector<sp<IAfEffectModule>>& effects)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
     // get an effect module if it is currently enable
-    sp<IAfEffectModule> getEffectIfEnabled(const effect_uuid_t *type);
+    sp<IAfEffectModule> getEffectIfEnabled_l(const effect_uuid_t* type)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
     // true if the effect whose descriptor is passed can be suspended
     // OEMs can modify the rules implemented in this method to exclude specific effect
     // types or implementations from the suspend/restore mechanism.
-    bool isEffectEligibleForSuspend(const effect_descriptor_t& desc);
+    bool isEffectEligibleForSuspend_l(const effect_descriptor_t& desc)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
-    static bool isEffectEligibleForBtNrecSuspend(const effect_uuid_t *type);
+    static bool isEffectEligibleForBtNrecSuspend_l(const effect_uuid_t* type)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
-    void clearInputBuffer_l();
+    void clearInputBuffer_l() REQUIRES(audio_utils::EffectChain_Mutex);
 
     // true if any effect module within the chain has volume control
-    bool hasVolumeControlEnabled_l() const;
+    bool hasVolumeControlEnabled_l() const REQUIRES(audio_utils::EffectChain_Mutex);
 
-    void setVolumeForOutput_l(uint32_t left, uint32_t right);
+    void setVolumeForOutput_l(uint32_t left, uint32_t right)
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
-    ssize_t getInsertIndex(const effect_descriptor_t& desc);
+    ssize_t getInsertIndex_ll(const effect_descriptor_t& desc)
+            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
 
-    std::optional<size_t> findVolumeControl_l(size_t from, size_t to) const;
+    std::optional<size_t> findVolumeControl_l(size_t from, size_t to) const
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
     // mutex protecting effect list
     mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kEffectChain_Mutex};
@@ -668,11 +708,11 @@
     status_t setEnabled(bool enabled, bool fromHandle) final;
     sp<IAfDeviceEffectProxy> asDeviceEffectProxy() final { return this; }
 
-    status_t init(const std::map<audio_patch_handle_t,
-            IAfPatchPanel::Patch>& patches) final;
+    status_t init_l(const std::map<audio_patch_handle_t, IAfPatchPanel::Patch>& patches) final
+            REQUIRES(audio_utils::DeviceEffectManager_Mutex) EXCLUDES_EffectBase_Mutex;
 
     status_t onCreatePatch(audio_patch_handle_t patchHandle,
-            const IAfPatchPanel::Patch& patch) final;
+                           const IAfPatchPanel::Patch& patch) final;
 
     status_t onUpdatePatch(audio_patch_handle_t oldPatchHandle, audio_patch_handle_t newPatchHandle,
            const IAfPatchPanel::Patch& patch) final;
@@ -690,10 +730,8 @@
     audio_channel_mask_t channelMask() const final;
     uint32_t channelCount() const final;
 
-    status_t command(int32_t cmdCode,
-                     const std::vector<uint8_t>& cmdData,
-                     int32_t maxReplySize,
-                     std::vector<uint8_t>* reply) final;
+    status_t command(int32_t cmdCode, const std::vector<uint8_t>& cmdData, int32_t maxReplySize,
+                     std::vector<uint8_t>* reply) final EXCLUDES_DeviceEffectProxy_ProxyMutex;
 
     void dump2(int fd, int spaces) const final;
 
@@ -739,7 +777,7 @@
 
         void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect __unused,
                               bool enabled __unused, bool threadLocked __unused) override {}
-        void resetVolume() override {}
+        void resetVolume_l() override REQUIRES(audio_utils::EffectChain_Mutex) {}
         product_strategy_t strategy() const override  { return static_cast<product_strategy_t>(0); }
         int32_t activeTrackCnt() const override { return 0; }
         void onEffectEnable(const sp<IAfEffectBase>& effect __unused) override;
@@ -759,13 +797,16 @@
     };
 
     status_t checkPort(const IAfPatchPanel::Patch& patch,
-            const struct audio_port_config *port, sp<IAfEffectHandle> *handle);
+            const struct audio_port_config* port, sp<IAfEffectHandle>* handle);
 
     const AudioDeviceTypeAddr mDevice;
     const sp<DeviceEffectManagerCallback> mManagerCallback;
     const sp<ProxyCallback> mMyCallback;
 
-    audio_utils::mutex& proxyMutex() const { return mProxyMutex; }
+    audio_utils::mutex& proxyMutex() const
+            RETURN_CAPABILITY(android::audio_utils::DeviceEffectProxy_ProxyMutex) {
+        return mProxyMutex;
+    }
     mutable audio_utils::mutex mProxyMutex{
             audio_utils::MutexOrder::kDeviceEffectProxy_ProxyMutex};
     std::map<audio_patch_handle_t, sp<IAfEffectHandle>> mEffectHandles; // protected by mProxyMutex
diff --git a/services/audioflinger/IAfEffect.h b/services/audioflinger/IAfEffect.h
index 8c5bc4b..82436a3 100644
--- a/services/audioflinger/IAfEffect.h
+++ b/services/audioflinger/IAfEffect.h
@@ -80,7 +80,7 @@
     // Methods usually implemented with help from EffectChain: pay attention to mutex locking order
     virtual product_strategy_t strategy() const = 0;
     virtual int32_t activeTrackCnt() const = 0;
-    virtual void resetVolume() = 0;
+    virtual void resetVolume_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual wp<IAfEffectChain> chain() const = 0;
     virtual bool isAudioPolicyReady() const = 0;
 };
@@ -106,43 +106,45 @@
     virtual bool isOffloadable() const = 0;
     virtual bool isImplementationSoftware() const = 0;
     virtual bool isProcessImplemented() const = 0;
-    virtual bool isVolumeControl() const = 0;
+    virtual bool isVolumeControl() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual bool isVolumeMonitor() const = 0;
     virtual bool isEnabled() const = 0;
     virtual bool isPinned() const = 0;
     virtual void unPin() = 0;
-    virtual status_t updatePolicyState() = 0;
-    virtual bool purgeHandles() = 0;
+    virtual status_t updatePolicyState() EXCLUDES_EffectBase_Mutex = 0;
+    virtual bool purgeHandles() EXCLUDES_EffectBase_Mutex = 0;
     virtual void checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) = 0;
 
     // mCallback is atomic so this can be lock-free.
     virtual void setCallback(const sp<EffectCallbackInterface>& callback) = 0;
     virtual sp<EffectCallbackInterface> getCallback() const = 0;
 
-    virtual status_t addHandle(IAfEffectHandle *handle) = 0;
-    virtual ssize_t removeHandle(IAfEffectHandle *handle) = 0;
+    virtual status_t addHandle(IAfEffectHandle* handle) EXCLUDES_EffectBase_Mutex = 0;
+    virtual ssize_t removeHandle(IAfEffectHandle* handle) EXCLUDES_EffectBase_Mutex = 0;
 
     virtual sp<IAfEffectModule> asEffectModule() = 0;
     virtual sp<IAfDeviceEffectProxy> asDeviceEffectProxy() = 0;
 
-    virtual status_t command(int32_t cmdCode,
-            const std::vector<uint8_t>& cmdData,
-            int32_t maxReplySize,
-            std::vector<uint8_t>* reply) = 0;
+    virtual status_t command(int32_t cmdCode, const std::vector<uint8_t>& cmdData,
+                             int32_t maxReplySize, std::vector<uint8_t>* reply)
+            EXCLUDES(audio_utils::EffectBase_Mutex) = 0;
 
     virtual void dump(int fd, const Vector<String16>& args) const = 0;
 
 private:
-    virtual status_t setEnabled(bool enabled, bool fromHandle) = 0;
-    virtual status_t setEnabled_l(bool enabled) = 0;
-    virtual void setSuspended(bool suspended) = 0;
-    virtual bool suspended() const = 0;
+    virtual status_t setEnabled(bool enabled, bool fromHandle) EXCLUDES_EffectBase_Mutex = 0;
+    virtual status_t setEnabled_l(bool enabled) REQUIRES(audio_utils::EffectBase_Mutex) = 0;
+    virtual void setSuspended(bool suspended) EXCLUDES_EffectBase_Mutex = 0;
+    virtual bool suspended() const EXCLUDES_EffectBase_Mutex = 0;
 
-    virtual ssize_t disconnectHandle(IAfEffectHandle *handle, bool unpinIfLast) = 0;
-    virtual ssize_t removeHandle_l(IAfEffectHandle *handle) = 0;
-    virtual IAfEffectHandle* controlHandle_l() = 0;
+    virtual ssize_t disconnectHandle(IAfEffectHandle* handle,
+                                     bool unpinIfLast) EXCLUDES_EffectBase_Mutex = 0;
+    virtual ssize_t removeHandle_l(IAfEffectHandle* handle)
+            REQUIRES(audio_utils::EffectBase_Mutex) = 0;
+    virtual IAfEffectHandle* controlHandle_l() REQUIRES(audio_utils::EffectBase_Mutex) = 0;
 
-    virtual audio_utils::mutex& mutex() const = 0;
+    virtual audio_utils::mutex& mutex() const
+            RETURN_CAPABILITY(android::audio_utils::EffectBase_Mutex) = 0;
 };
 
 class IAfEffectModule : public virtual IAfEffectBase {
@@ -162,41 +164,51 @@
     virtual status_t setDevices(const AudioDeviceTypeAddrVector &devices) = 0;
     virtual status_t setInputDevice(const AudioDeviceTypeAddr &device) = 0;
     virtual status_t setVolume(uint32_t *left, uint32_t *right, bool controller) = 0;
-    virtual status_t setOffloaded(bool offloaded, audio_io_handle_t io) = 0;
-    virtual bool isOffloaded() const = 0;
+    virtual status_t setOffloaded_l(bool offloaded, audio_io_handle_t io) = 0;
+    virtual bool isOffloaded_l() const = 0;
 
     virtual status_t setAudioSource(audio_source_t source) = 0;
     virtual status_t setMode(audio_mode_t mode) = 0;
 
-    virtual status_t start() = 0;
-    virtual status_t getConfigs(audio_config_base_t* inputCfg,
-            audio_config_base_t* outputCfg,
-            bool* isOutput) const = 0;
+    virtual status_t start_l() = 0;
+    virtual status_t getConfigs_l(audio_config_base_t* inputCfg, audio_config_base_t* outputCfg,
+                                  bool* isOutput) const
+            REQUIRES(audio_utils::EffectHandle_Mutex) EXCLUDES_EffectBase_Mutex = 0;
 
     static bool isHapticGenerator(const effect_uuid_t* type);
     virtual bool isHapticGenerator() const = 0;
-    virtual status_t setHapticIntensity(int id, os::HapticScale intensity) = 0;
-    virtual status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo) = 0;
+    static bool isSpatializer(const effect_uuid_t* type);
+    virtual bool isSpatializer() const = 0;
+
+    virtual status_t setHapticIntensity_l(int id, os::HapticScale intensity)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    virtual status_t setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    virtual status_t sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata)
+            REQUIRES(audio_utils::ThreadBase_Mutex,
+                     audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
 
 private:
     virtual void process() = 0;
-    virtual bool updateState() = 0;
-    virtual void reset_l() = 0;
-    virtual status_t configure() = 0;
-    virtual status_t init() = 0;
+    virtual bool updateState_l()
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    virtual void reset_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual status_t configure_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual status_t init_l()
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual uint32_t status() const = 0;
     virtual bool isProcessEnabled() const = 0;
-    virtual bool isOffloadedOrDirect() const = 0;
-    virtual bool isVolumeControlEnabled() const = 0;
+    virtual bool isOffloadedOrDirect_l() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual bool isVolumeControlEnabled_l() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
     virtual void setInBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
     virtual void setOutBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
     virtual int16_t *outBuffer() const = 0;
 
     // Updates the access mode if it is out of date.  May issue a new effect configure.
-    virtual void updateAccessMode() = 0;
+    virtual void updateAccessMode_l() = 0;
 
-    virtual status_t stop() = 0;
+    virtual status_t stop_l() = 0;
     virtual void addEffectToHal_l() = 0;
     virtual void release_l() = 0;
 };
@@ -216,33 +228,41 @@
     // a session is stopped or removed to allow effect tail to be rendered
     static constexpr int kProcessTailDurationMs = 1000;
 
-    virtual void process_l() = 0;
+    virtual void process_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual audio_utils::mutex& mutex() const = 0;
+    virtual audio_utils::mutex& mutex() const RETURN_CAPABILITY(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual status_t createEffect_l(sp<IAfEffectModule>& effect,
-                            effect_descriptor_t *desc,
-                            int id,
-                            audio_session_t sessionId,
-                            bool pinned) = 0;
+    virtual status_t createEffect_l(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
+                                    audio_session_t sessionId, bool pinned)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
-    virtual status_t addEffect_l(const sp<IAfEffectModule>& handle) = 0;
-    virtual status_t addEffect_ll(const sp<IAfEffectModule>& handle) = 0;
-    virtual size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) = 0;
+    virtual status_t addEffect_l(const sp<IAfEffectModule>& handle)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
+    virtual status_t addEffect_ll(const sp<IAfEffectModule>& handle)
+            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex) = 0;
+    virtual size_t removeEffect_l(const sp<IAfEffectModule>& handle,
+                                  bool release = false) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual audio_session_t sessionId() const = 0;
     virtual void setSessionId(audio_session_t sessionId) = 0;
 
-    virtual sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor) const = 0;
-    virtual sp<IAfEffectModule> getEffectFromId_l(int id) const = 0;
-    virtual sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t *type) const = 0;
-    virtual std::vector<int> getEffectIds() const = 0;
-    virtual bool setVolume_l(uint32_t *left, uint32_t *right, bool force = false) = 0;
-    virtual void resetVolume_l() = 0;
-    virtual void setDevices_l(const AudioDeviceTypeAddrVector &devices) = 0;
-    virtual void setInputDevice_l(const AudioDeviceTypeAddr &device) = 0;
-    virtual void setMode_l(audio_mode_t mode) = 0;
-    virtual void setAudioSource_l(audio_source_t source) = 0;
+    virtual sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t* descriptor) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual sp<IAfEffectModule> getEffectFromId_l(int id) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t* type) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual std::vector<int> getEffectIds_l() const = 0;
+    virtual bool setVolume(uint32_t* left, uint32_t* right,
+                           bool force = false) EXCLUDES_EffectChain_Mutex = 0;
+    virtual void resetVolume_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual void setDevices_l(const AudioDeviceTypeAddrVector& devices)
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual void setInputDevice_l(const AudioDeviceTypeAddr& device)
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual void setMode_l(audio_mode_t mode) REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual void setAudioSource_l(audio_source_t source)
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
 
     virtual void setInBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
     virtual float *inBuffer() const = 0;
@@ -262,20 +282,21 @@
 
     // suspend or restore effects of the specified type. The number of suspend requests is counted
     // and restore occurs once all suspend requests are cancelled.
-    virtual void setEffectSuspended_l(
-            const effect_uuid_t *type, bool suspend) = 0;
+    virtual void setEffectSuspended_l(const effect_uuid_t* type, bool suspend) = 0;
     // suspend all eligible effects
     virtual void setEffectSuspendedAll_l(bool suspend) = 0;
     // check if effects should be suspended or restored when a given effect is enable or disabled
-    virtual void checkSuspendOnEffectEnabled(const sp<IAfEffectModule>& effect, bool enabled) = 0;
+    virtual void checkSuspendOnEffectEnabled_l(const sp<IAfEffectModule>& effect, bool enabled)
+            REQUIRES(audio_utils::ThreadBase_Mutex) REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
 
-    virtual void clearInputBuffer() = 0;
+    virtual void clearInputBuffer() EXCLUDES_EffectChain_Mutex = 0;
 
     // At least one non offloadable effect in the chain is enabled
-    virtual bool isNonOffloadableEnabled() const = 0;
-    virtual bool isNonOffloadableEnabled_l() const = 0;
+    virtual bool isNonOffloadableEnabled() const EXCLUDES_EffectChain_Mutex = 0;
+    virtual bool isNonOffloadableEnabled_l() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual void syncHalEffectsState() = 0;
+    virtual void syncHalEffectsState_l()
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
     // flags is an ORed set of audio_output_flags_t which is updated on return.
     virtual void checkOutputFlagCompatibility(audio_output_flags_t *flags) const = 0;
@@ -293,22 +314,28 @@
     virtual bool isBitPerfectCompatible() const = 0;
 
     // isCompatibleWithThread_l() must be called with thread->mLock held
-    virtual bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const = 0;
+    virtual bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual bool containsHapticGeneratingEffect_l() = 0;
 
-    virtual void setHapticIntensity_l(int id, os::HapticScale intensity) = 0;
+    virtual void setHapticIntensity_l(int id, os::HapticScale intensity)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual sp<EffectCallbackInterface> effectCallback() const = 0;
 
     virtual wp<IAfThreadBase> thread() const = 0;
-    virtual void setThread(const sp<IAfThreadBase>& thread) = 0;
+    virtual void setThread(const sp<IAfThreadBase>& thread) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual bool isFirstEffect(int id) const = 0;
 
     virtual size_t numberOfEffects() const = 0;
     virtual sp<IAfEffectModule> getEffectModule(size_t index) const = 0;
 
+    // sendMetadata_l() must be called with thread->mLock held
+    virtual void sendMetadata_l(const std::vector<playback_track_metadata_v7_t>& allMetadata,
+        const std::optional<const std::vector<playback_track_metadata_v7_t>> spatializedMetadata);
+
     virtual void dump(int fd, const Vector<String16>& args) const = 0;
 };
 
@@ -352,9 +379,8 @@
                 const sp<DeviceEffectManagerCallback>& callback,
                 effect_descriptor_t *desc, int id, bool notifyFramesProcessed);
 
-    virtual status_t init(
-            const std::map<audio_patch_handle_t,
-            IAfPatchPanel::Patch>& patches) = 0;
+    virtual status_t init_l(const std::map<audio_patch_handle_t, IAfPatchPanel::Patch>& patches)
+            REQUIRES(audio_utils::DeviceEffectManager_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual const AudioDeviceTypeAddr& device() const = 0;
 
     virtual status_t onCreatePatch(
diff --git a/services/audioflinger/IAfThread.h b/services/audioflinger/IAfThread.h
index 7084be9..d701288 100644
--- a/services/audioflinger/IAfThread.h
+++ b/services/audioflinger/IAfThread.h
@@ -279,7 +279,7 @@
     // integrity of the chains during the process.
     // Also sets the parameter 'effectChains' to current value of mEffectChains.
     virtual void lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains)
-            REQUIRES(mutex()) = 0;
+            REQUIRES(mutex()) EXCLUDES_EffectChain_Mutex = 0;
     // unlock effect chains after process
     virtual void unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains)
             EXCLUDES_ThreadBase_Mutex = 0;
@@ -386,6 +386,12 @@
             const effect_uuid_t* type, bool suspend, audio_session_t sessionId)
             REQUIRES(mutex()) = 0;
 
+    // Wait while the Thread is busy.  This is done to ensure that
+    // the Thread is not busy releasing the Tracks, during which the Thread mutex
+    // may be temporarily unlocked.  Some Track methods will use this method to
+    // avoid races.
+    virtual void waitWhileThreadBusy_l(audio_utils::unique_lock& ul)
+            REQUIRES(mutex()) = 0;
     // Dynamic cast to derived interface
     virtual sp<IAfDirectOutputThread> asIAfDirectOutputThread() { return nullptr; }
     virtual sp<IAfDuplicatingThread> asIAfDuplicatingThread() { return nullptr; }
diff --git a/services/audioflinger/IAfTrack.h b/services/audioflinger/IAfTrack.h
index 2302e13..ac4ed36 100644
--- a/services/audioflinger/IAfTrack.h
+++ b/services/audioflinger/IAfTrack.h
@@ -18,6 +18,7 @@
 
 #include <android/media/BnAudioRecord.h>
 #include <android/media/BnAudioTrack.h>
+#include <audio_utils/mutex.h>
 #include <audiomanager/IAudioManager.h>
 #include <binder/IMemory.h>
 #include <fastpath/FastMixerDumpState.h>
@@ -351,7 +352,8 @@
     virtual sp<os::ExternalVibration> getExternalVibration() const = 0;
 
     // This function should be called with holding thread lock.
-    virtual void updateTeePatches_l() = 0;
+    virtual void updateTeePatches_l() REQUIRES(audio_utils::ThreadBase_Mutex)
+            EXCLUDES_BELOW_ThreadBase_Mutex = 0;
 
     // Argument teePatchesToUpdate is by value, use std::move to optimize.
     virtual void setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) = 0;
diff --git a/services/audioflinger/MelReporter.cpp b/services/audioflinger/MelReporter.cpp
index 41c5096..1d38306 100644
--- a/services/audioflinger/MelReporter.cpp
+++ b/services/audioflinger/MelReporter.cpp
@@ -307,6 +307,22 @@
 
 }
 
+void MelReporter::applyAllAudioPatches() {
+    ALOGV("%s", __func__);
+
+    std::vector<IAfPatchPanel::Patch> patchesCopy;
+    {
+        audio_utils::lock_guard _laf(mAfMelReporterCallback->mutex());
+        for (const auto& patch : mAfPatchPanel->patches_l()) {
+            patchesCopy.emplace_back(patch.second);
+        }
+    }
+
+    for (const auto& patch : patchesCopy) {
+        onCreateAudioPatch(patch.mHalHandle, patch);
+    }
+}
+
 std::optional<audio_patch_handle_t> MelReporter::activePatchStreamHandle_l(
         audio_io_handle_t streamHandle) {
     for(const auto& patchIt : mActiveMelPatches) {
diff --git a/services/audioflinger/MelReporter.h b/services/audioflinger/MelReporter.h
index 235dd11..0aeb225 100644
--- a/services/audioflinger/MelReporter.h
+++ b/services/audioflinger/MelReporter.h
@@ -27,8 +27,6 @@
 
 namespace android {
 
-constexpr static int kMaxTimestampDeltaInSec = 120;
-
 class IAfMelReporterCallback : public virtual RefBase {
 public:
     virtual audio_utils::mutex& mutex() const
@@ -45,8 +43,10 @@
 class MelReporter : public PatchCommandThread::PatchCommandListener,
                     public IMelReporterCallback {
 public:
-    explicit MelReporter(const sp<IAfMelReporterCallback>& afMelReporterCallback)
-        : mAfMelReporterCallback(afMelReporterCallback) {}
+    MelReporter(const sp<IAfMelReporterCallback>& afMelReporterCallback,
+                const sp<IAfPatchPanel>& afPatchPanel)
+        : mAfMelReporterCallback(afMelReporterCallback),
+          mAfPatchPanel(afPatchPanel) {}
 
     void onFirstRef() override;
 
@@ -80,9 +80,10 @@
 
     // IMelReporterCallback methods
     void stopMelComputationForDeviceId(audio_port_handle_t deviceId) final
-            EXCLUDES_MelReporter_Mutex;
+            EXCLUDES_AudioFlinger_Mutex EXCLUDES_MelReporter_Mutex;
     void startMelComputationForDeviceId(audio_port_handle_t deviceId) final
-            EXCLUDES_MelReporter_Mutex;
+            EXCLUDES_AudioFlinger_Mutex EXCLUDES_MelReporter_Mutex;
+    void applyAllAudioPatches() final EXCLUDES_AudioFlinger_Mutex EXCLUDES_MelReporter_Mutex;
 
     // PatchCommandListener methods
     void onCreateAudioPatch(audio_patch_handle_t handle,
@@ -131,6 +132,7 @@
     bool useHalSoundDoseInterface_l() REQUIRES(mutex());
 
     const sp<IAfMelReporterCallback> mAfMelReporterCallback;
+    const sp<IAfPatchPanel> mAfPatchPanel;
 
     /* const */ sp<SoundDoseManager> mSoundDoseManager;  // set onFirstRef
 
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index b4cb805..826ba65 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -194,7 +194,8 @@
     sp<os::ExternalVibration> getExternalVibration() const final { return mExternalVibration; }
 
             // This function should be called with holding thread lock.
-    void updateTeePatches_l() final;
+    void updateTeePatches_l() final REQUIRES(audio_utils::ThreadBase_Mutex)
+            EXCLUDES_BELOW_ThreadBase_Mutex;
     void setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) final;
 
     void tallyUnderrunFrames(size_t frames) final {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d61621a..9018dcc 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -47,6 +47,7 @@
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/PersistableBundle.h>
+#include <com_android_media_audio.h>
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
 #include <fastpath/AutoPark.h>
@@ -222,6 +223,8 @@
 static const int kPriorityAudioApp = 2;
 static const int kPriorityFastMixer = 3;
 static const int kPriorityFastCapture = 3;
+// Request real-time priority for PlaybackThread in ARC
+static const int kPriorityPlaybackThreadArc = 1;
 
 // IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the
 // track buffer in shared memory.  Zero on input means to use a default value.  For fast tracks,
@@ -721,8 +724,9 @@
     {
         audio_utils::unique_lock _l(event->mutex());
         while (event->mWaitStatus) {
-            if (event->mCondition.wait_for(_l, std::chrono::nanoseconds(kConfigEventTimeoutNs))
-                        == std::cv_status::timeout) {
+            if (event->mCondition.wait_for(
+                    _l, std::chrono::nanoseconds(kConfigEventTimeoutNs), getTid())
+                            == std::cv_status::timeout) {
                 event->mStatus = TIMED_OUT;
                 event->mWaitStatus = false;
             }
@@ -1483,7 +1487,7 @@
         return BAD_VALUE;
     }
 
-    if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+    if (IAfEffectModule::isSpatializer(&desc->type)
             && mType != SPATIALIZER) {
         ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
                 __func__, mType);
@@ -1571,7 +1575,7 @@
             return BAD_VALUE;
         } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
             // only post processing , downmixer or spatializer effects on output stage session
-            if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+            if (IAfEffectModule::isSpatializer(&desc->type)
                     || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
                 break;
             }
@@ -1690,7 +1694,7 @@
                     std::move(mAfThreadCallback->getDefaultVibratorInfo_l());
             if (defaultVibratorInfo) {
                 // Only set the vibrator info when it is a valid one.
-                effect->setVibratorInfo(*defaultVibratorInfo);
+                effect->setVibratorInfo_l(*defaultVibratorInfo);
             }
         }
         // create effect handle and connect it to effect module
@@ -1792,7 +1796,7 @@
 std::vector<int> ThreadBase::getEffectIds_l(audio_session_t sessionId) const
 {
     sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
-    return chain != nullptr ? chain->getEffectIds() : std::vector<int>{};
+    return chain != nullptr ? chain->getEffectIds_l() : std::vector<int>{};
 }
 
 // PlaybackThread::addEffect_ll() must be called with AudioFlinger::mutex() and
@@ -1824,7 +1828,7 @@
         return BAD_VALUE;
     }
 
-    effect->setOffloaded(mType == OFFLOAD, mId);
+    effect->setOffloaded_l(mType == OFFLOAD, mId);
 
     status_t status = chain->addEffect_l(effect);
     if (status != NO_ERROR) {
@@ -1861,22 +1865,20 @@
     }
 }
 
-void ThreadBase::lockEffectChains_l(
-        Vector<sp<IAfEffectChain>>& effectChains)
-NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::lock()
+void ThreadBase::lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains)
+        NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::lock()
 {
     effectChains = mEffectChains;
-    for (size_t i = 0; i < mEffectChains.size(); i++) {
-        mEffectChains[i]->mutex().lock();
+    for (const auto& effectChain : effectChains) {
+        effectChain->mutex().lock();
     }
 }
 
-void ThreadBase::unlockEffectChains(
-        const Vector<sp<IAfEffectChain>>& effectChains)
-NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::unlock()
+void ThreadBase::unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains)
+        NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::unlock()
 {
-    for (size_t i = 0; i < effectChains.size(); i++) {
-        effectChains[i]->mutex().unlock();
+    for (const auto& effectChain : effectChains) {
+        effectChain->mutex().unlock();
     }
 }
 
@@ -2849,6 +2851,8 @@
         // effectively get the latency it requested.
         if (track->isExternalTrack()) {
             IAfTrackBase::track_state state = track->state();
+            // Because the track is not on the ActiveTracks,
+            // at this point, only the TrackHandle will be adding the track.
             mutex().unlock();
             status = AudioSystem::startOutput(track->portId());
             mutex().lock();
@@ -2929,7 +2933,12 @@
 
         track->setResetDone(false);
         track->resetPresentationComplete();
+
+        // Do not release the ThreadBase mutex after the track is added to mActiveTracks unless
+        // all key changes are complete.  It is possible that the threadLoop will begin
+        // processing the added track immediately after the ThreadBase mutex is released.
         mActiveTracks.add(track);
+
         if (chain != 0) {
             ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
                     track->sessionId());
@@ -3311,10 +3320,48 @@
         return {}; // nothing to do
     }
     StreamOutHalInterface::SourceMetadata metadata;
-    auto backInserter = std::back_inserter(metadata.tracks);
-    for (const sp<IAfTrack>& track : mActiveTracks) {
-        // No track is invalid as this is called after prepareTrack_l in the same critical section
-        track->copyMetadataTo(backInserter);
+    static const bool stereo_spatialization_property =
+            property_get_bool("ro.audio.stereo_spatialization_enabled", false);
+    const bool stereo_spatialization_enabled =
+            stereo_spatialization_property && com_android_media_audio_stereo_spatialization();
+    if (stereo_spatialization_enabled) {
+        std::map<audio_session_t, std::vector<playback_track_metadata_v7_t> >allSessionsMetadata;
+        for (const sp<IAfTrack>& track : mActiveTracks) {
+            std::vector<playback_track_metadata_v7_t>& sessionMetadata =
+                    allSessionsMetadata[track->sessionId()];
+            auto backInserter = std::back_inserter(sessionMetadata);
+            // No track is invalid as this is called after prepareTrack_l in the same
+            // critical section
+            track->copyMetadataTo(backInserter);
+        }
+        std::vector<playback_track_metadata_v7_t> spatializedTracksMetaData;
+        for (const auto& [session, sessionTrackMetadata] : allSessionsMetadata) {
+            metadata.tracks.insert(metadata.tracks.end(),
+                    sessionTrackMetadata.begin(), sessionTrackMetadata.end());
+            if (auto chain = getEffectChain_l(session) ; chain != nullptr) {
+                chain->sendMetadata_l(sessionTrackMetadata, {});
+            }
+            if ((hasAudioSession_l(session) & IAfThreadBase::SPATIALIZED_SESSION) != 0) {
+                spatializedTracksMetaData.insert(spatializedTracksMetaData.end(),
+                        sessionTrackMetadata.begin(), sessionTrackMetadata.end());
+            }
+        }
+        if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); chain != nullptr) {
+            chain->sendMetadata_l(metadata.tracks, {});
+        }
+        if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE); chain != nullptr) {
+            chain->sendMetadata_l(metadata.tracks, spatializedTracksMetaData);
+        }
+        if (auto chain = getEffectChain_l(AUDIO_SESSION_DEVICE); chain != nullptr) {
+            chain->sendMetadata_l(metadata.tracks, {});
+        }
+    } else {
+        auto backInserter = std::back_inserter(metadata.tracks);
+        for (const sp<IAfTrack>& track : mActiveTracks) {
+            // No track is invalid as this is called after prepareTrack_l in the same
+            // critical section
+            track->copyMetadataTo(backInserter);
+        }
     }
     sendMetadataToBackend_l(metadata);
     MetadataUpdate change;
@@ -3909,6 +3956,27 @@
                 stream()->setHalThreadPriority(priorityBoost);
             }
         }
+    } else if (property_get_bool("ro.boot.container", false /* default_value */)) {
+        // In ARC experiments (b/73091832), the latency under using CFS scheduler with any priority
+        // is not enough for PlaybackThread to process audio data in time. We request the lowest
+        // real-time priority, SCHED_FIFO=1, for PlaybackThread in ARC. ro.boot.container is true
+        // only on ARC.
+        const pid_t tid = getTid();
+        if (tid == -1) {
+            ALOGW("%s: Cannot update PlaybackThread priority for ARC, no tid", __func__);
+        } else {
+            const status_t status = requestPriority(getpid(),
+                                                    tid,
+                                                    kPriorityPlaybackThreadArc,
+                                                    false /* isForApp */,
+                                                    true /* asynchronous */);
+            if (status != OK) {
+                ALOGW("%s: Cannot update PlaybackThread priority for ARC, status %d", __func__,
+                        status);
+            } else {
+                stream()->setHalThreadPriority(kPriorityPlaybackThreadArc);
+            }
+        }
     }
 
     Vector<sp<IAfTrack>> tracksToRemove;
@@ -4120,6 +4188,30 @@
 
             metadataUpdate = updateMetadata_l();
 
+            // Acquire a local copy of active tracks with lock (release w/o lock).
+            //
+            // Control methods on the track acquire the ThreadBase lock (e.g. start()
+            // stop(), pause(), etc.), but the threadLoop is entitled to call audio
+            // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
+            activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
+
+            setHalLatencyMode_l();
+
+            // updateTeePatches_l will acquire the ThreadBase_Mutex of other threads,
+            // so this is done before we lock our effect chains.
+            for (const auto& track : mActiveTracks) {
+                track->updateTeePatches_l();
+            }
+
+            // signal actual start of output stream when the render position reported by
+            // the kernel starts moving.
+            if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
+                    && (mKernelPositionOnStandby
+                            != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
+                mHalStarted = true;
+                mWaitHalStartCV.notify_all();
+            }
+
             // prevent any changes in effect chain list and in each effect chain
             // during mixing and effect process as the audio buffers could be deleted
             // or modified if an effect is created or deleted
@@ -4147,28 +4239,6 @@
                     }
                 }
             }
-
-            // Acquire a local copy of active tracks with lock (release w/o lock).
-            //
-            // Control methods on the track acquire the ThreadBase lock (e.g. start()
-            // stop(), pause(), etc.), but the threadLoop is entitled to call audio
-            // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
-            activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
-
-            setHalLatencyMode_l();
-
-            for (const auto &track : mActiveTracks ) {
-                track->updateTeePatches_l();
-            }
-
-            // signal actual start of output stream when the render position reported by the kernel
-            // starts moving.
-            if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
-                    && (mKernelPositionOnStandby
-                            != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
-                mHalStarted = true;
-                mWaitHalStartCV.notify_all();
-            }
         } // mutex() scope ends
 
         if (mBytesRemaining == 0) {
@@ -4704,8 +4774,12 @@
 void PlaybackThread::removeTracks_l(const Vector<sp<IAfTrack>>& tracksToRemove)
 NO_THREAD_SAFETY_ANALYSIS  // release and re-acquire mutex()
 {
+    if (tracksToRemove.empty()) return;
+
+    // Block all incoming TrackHandle requests until we are finished with the release.
+    setThreadBusy_l(true);
+
     for (const auto& track : tracksToRemove) {
-        mActiveTracks.remove(track);
         ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
         sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
         if (chain != 0) {
@@ -4713,17 +4787,16 @@
                     __func__, track->id(), chain.get(), track->sessionId());
             chain->decActiveTrackCnt();
         }
+
         // If an external client track, inform APM we're no longer active, and remove if needed.
-        // We do this under lock so that the state is consistent if the Track is destroyed.
+        // Since the track is active, we do it here instead of TrackBase::destroy().
         if (track->isExternalTrack()) {
+            mutex().unlock();
             AudioSystem::stopOutput(track->portId());
             if (track->isTerminated()) {
                 AudioSystem::releaseOutput(track->portId());
             }
-        }
-        if (track->isTerminated()) {
-            // remove from our tracks vector
-            removeTrack_l(track);
+            mutex().lock();
         }
         if (mHapticChannelCount > 0 &&
                 ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
@@ -4740,7 +4813,24 @@
                 chain->setHapticIntensity_l(track->id(), os::HapticScale::MUTE);
             }
         }
+
+        // Under lock, the track is removed from the active tracks list.
+        //
+        // Once the track is no longer active, the TrackHandle may directly
+        // modify it as the threadLoop() is no longer responsible for its maintenance.
+        // Do not modify the track from threadLoop after the mutex is unlocked
+        // if it is not active.
+        mActiveTracks.remove(track);
+
+        if (track->isTerminated()) {
+            // remove from our tracks vector
+            removeTrack_l(track);
+        }
     }
+
+    // Allow incoming TrackHandle requests.  We still hold the mutex,
+    // so pending TrackHandle requests will occur after we unlock it.
+    setThreadBusy_l(false);
 }
 
 status_t PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
@@ -5439,7 +5529,7 @@
     sp<IAfEffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
     if (chain != 0) {
         uint32_t v = (uint32_t)(masterVolume * (1 << 24));
-        chain->setVolume_l(&v, &v);
+        chain->setVolume(&v, &v);
         masterVolume = (float)((v + (1 << 23)) >> 24);
         chain.clear();
     }
@@ -5774,7 +5864,7 @@
 
             mixedTracks++;
 
-            // track->mainBuffer() != mSinkBuffer or mMixerBuffer means
+            // track->mainBuffer() != mSinkBuffer and mMixerBuffer means
             // there is an effect chain connected to the track
             chain.clear();
             if (track->mainBuffer() != mSinkBuffer &&
@@ -5878,7 +5968,7 @@
             track->setFinalVolume(vrf, vlf);
 
             // Delegate volume control to effect in track effect chain if needed
-            if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
+            if (chain != 0 && chain->setVolume(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
                 param = AudioMixer::VOLUME;
                 // Update remaining floating point volume levels
@@ -6618,8 +6708,8 @@
                 // Convert volumes from float to 8.24
                 uint32_t vl = (uint32_t)(left * (1 << 24));
                 uint32_t vr = (uint32_t)(right * (1 << 24));
-                // Direct/Offload effect chains set output volume in setVolume_l().
-                (void)mEffectChains[0]->setVolume_l(&vl, &vr);
+                // Direct/Offload effect chains set output volume in setVolume().
+                (void)mEffectChains[0]->setVolume(&vl, &vr);
             } else {
                 // otherwise we directly set the volume.
                 setVolumeForOutput_l(left, right);
@@ -7813,16 +7903,12 @@
         //   (mRequestedLatencyMode = AUDIO_LATENCY_MODE_LOW)
         //      AND
         // - At least one active track is spatialized
-        bool hasSpatializedActiveTrack = false;
         for (const auto& track : mActiveTracks) {
             if (track->isSpatialized()) {
-                hasSpatializedActiveTrack = true;
+                latencyMode = mRequestedLatencyMode;
                 break;
             }
         }
-        if (hasSpatializedActiveTrack && mRequestedLatencyMode == AUDIO_LATENCY_MODE_LOW) {
-            latencyMode = AUDIO_LATENCY_MODE_LOW;
-        }
     }
 
     if (latencyMode != mSetLatencyMode) {
@@ -7836,7 +7922,7 @@
 }
 
 status_t SpatializerThread::setRequestedLatencyMode(audio_latency_mode_t mode) {
-    if (mode != AUDIO_LATENCY_MODE_LOW && mode != AUDIO_LATENCY_MODE_FREE) {
+    if (mode < 0 || mode >= AUDIO_LATENCY_MODE_CNT) {
         return BAD_VALUE;
     }
     audio_utils::lock_guard _l(mutex());
@@ -9686,7 +9772,7 @@
 
     // make sure enabled pre processing effects state is communicated to the HAL as we
     // just moved them to a new input stream.
-    chain->syncHalEffectsState();
+    chain->syncHalEffectsState_l();
 
     mEffectChains.add(chain);
 
@@ -10671,7 +10757,7 @@
     chain->setThread(this);
     chain->setInBuffer(nullptr);
     chain->setOutBuffer(nullptr);
-    chain->syncHalEffectsState();
+    chain->syncHalEffectsState_l();
 
     mEffectChains.add(chain);
     checkSuspendOnAddEffectChain_l(chain);
@@ -10964,7 +11050,7 @@
         // only one effect chain can be present on DirectOutputThread, so if
         // there is one, the track is connected to it
         if (!mEffectChains.isEmpty()) {
-            mEffectChains[0]->setVolume_l(&vol, &vol);
+            mEffectChains[0]->setVolume(&vol, &vol);
             volume = (float)vol / (1 << 24);
         }
         // Try to use HW volume control and fall back to SW control if not implemented
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 21134a2..86e1894 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -436,9 +436,11 @@
                 // ThreadBase mutex before processing the mixer and effects. This guarantees the
                 // integrity of the chains during the process.
                 // Also sets the parameter 'effectChains' to current value of mEffectChains.
-    void lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains) final REQUIRES(mutex());
+    void lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) ACQUIRE(audio_utils::EffectChain_Mutex);
                 // unlock effect chains after process
-    void unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains) final;
+    void unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains) final
+            RELEASE(audio_utils::EffectChain_Mutex);
                 // get a copy of mEffectChains vector
     Vector<sp<IAfEffectChain>> getEffectChains_l() const final REQUIRES(mutex()) {
         return mEffectChains;
@@ -599,6 +601,35 @@
                 // check if some effects must be suspended when an effect chain is added
     void checkSuspendOnAddEffectChain_l(const sp<IAfEffectChain>& chain) REQUIRES(mutex());
 
+    /**
+     * waitWhileThreadBusy_l() serves as a mutex gate, which does not allow
+     * progress beyond the method while the PlaybackThread is busy (see setThreadBusy_l()).
+     * During the wait, the ThreadBase_Mutex is temporarily unlocked.
+     *
+     * This implementation uses a condition variable.  Alternative methods to gate
+     * the thread may use a second mutex (i.e. entry based on scoped_lock(mutex, gating_mutex)),
+     * but those have less flexibility and more lock order issues.
+     *
+     * Current usage by Track::destroy(), Track::start(), Track::stop(), Track::pause(),
+     * and Track::flush() block this way, and the primary caller is through TrackHandle
+     * with no other mutexes held.
+     *
+     * Special tracks like PatchTrack and OutputTrack may also hold the another thread's
+     * ThreadBase_Mutex during this time.  No other mutex is held.
+     */
+
+    void waitWhileThreadBusy_l(audio_utils::unique_lock& ul) final REQUIRES(mutex()) {
+        // the wait returns immediately if the predicate is satisfied.
+        mThreadBusyCv.wait(ul, [&]{ return mThreadBusy == false;});
+    }
+
+    void setThreadBusy_l(bool busy) REQUIRES(mutex()) {
+        if (busy == mThreadBusy) return;
+        mThreadBusy = busy;
+        if (busy == true) return;  // no need to wake threads if we become busy.
+        mThreadBusyCv.notify_all();
+    }
+
                 // sends the metadata of the active tracks to the HAL
                 struct MetadataUpdate {
                     std::vector<playback_track_metadata_v7_t> playbackMetadataUpdate;
@@ -641,6 +672,13 @@
                 ThreadMetrics           mThreadMetrics;
                 const bool              mIsOut;
 
+    // mThreadBusy is checked under the ThreadBase_Mutex to ensure that
+    // TrackHandle operations do not proceed while the ThreadBase is busy
+    // with the track.  mThreadBusy is only true if the track is active.
+    //
+    bool mThreadBusy = false; // GUARDED_BY(ThreadBase_Mutex) but read in lambda.
+    audio_utils::condition_variable mThreadBusyCv;
+
                 // updated by PlaybackThread::readOutputParameters_l() or
                 // RecordThread::readInputParameters_l()
                 uint32_t                mSampleRate;
@@ -839,7 +877,7 @@
 
                 SimpleLog mLocalLog;  // locked internally
 
-private:
+    private:
     void dumpBase_l(int fd, const Vector<String16>& args) REQUIRES(mutex());
     void dumpEffectChains_l(int fd, const Vector<String16>& args) REQUIRES(mutex());
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f18e69b..4e82173 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -890,12 +890,17 @@
         bool wasActive = false;
         const sp<IAfThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            audio_utils::lock_guard _l(thread->mutex());
+            audio_utils::unique_lock ul(thread->mutex());
+            thread->waitWhileThreadBusy_l(ul);
+
             auto* const playbackThread = thread->asIAfPlaybackThread().get();
             wasActive = playbackThread->destroyTrack_l(this);
             forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
         }
         if (isExternalTrack() && !wasActive) {
+            // If the track is not active, the TrackHandle is responsible for
+            // releasing the port id, not the ThreadBase::threadLoop().
+            // At this point, there is no concurrency issue as the track is going away.
             AudioSystem::releaseOutput(mPortId);
         }
     }
@@ -1187,7 +1192,9 @@
                 return PERMISSION_DENIED;
             }
         }
-        audio_utils::lock_guard _lth(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
@@ -1312,7 +1319,9 @@
     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
     const sp<IAfThreadBase> thread = mThread.promote();
     if (thread != 0) {
-        audio_utils::lock_guard _l(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         track_state state = mState;
         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
             // If the track is not active (PAUSED and buffers full), flush buffers
@@ -1347,7 +1356,9 @@
     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
     const sp<IAfThreadBase> thread = mThread.promote();
     if (thread != 0) {
-        audio_utils::lock_guard _l(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         auto* const playbackThread = thread->asIAfPlaybackThread().get();
         switch (mState) {
         case STOPPING_1:
@@ -1384,7 +1395,9 @@
     ALOGV("%s(%d)", __func__, mId);
     const sp<IAfThreadBase> thread = mThread.promote();
     if (thread != 0) {
-        audio_utils::lock_guard _l(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         auto* const playbackThread = thread->asIAfPlaybackThread().get();
 
         // Flush the ring buffer now if the track is not active in the PlaybackThread.
diff --git a/services/audioflinger/afutils/Vibrator.cpp b/services/audioflinger/afutils/Vibrator.cpp
index 25fcc6a..ab15a09 100644
--- a/services/audioflinger/afutils/Vibrator.cpp
+++ b/services/audioflinger/afutils/Vibrator.cpp
@@ -44,6 +44,10 @@
 }
 
 os::HapticScale onExternalVibrationStart(const sp<os::ExternalVibration>& externalVibration) {
+    if (externalVibration->getAudioAttributes().flags & AUDIO_FLAG_MUTE_HAPTIC) {
+        ALOGD("%s, mute haptic according to audio attributes flag", __func__);
+        return os::HapticScale::MUTE;
+    }
     const sp<os::IExternalVibratorService> evs = getExternalVibratorService();
     if (evs != nullptr) {
         int32_t ret;
diff --git a/services/audioflinger/sounddose/SoundDoseManager.cpp b/services/audioflinger/sounddose/SoundDoseManager.cpp
index 1ff08dc..8d40b63 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.cpp
+++ b/services/audioflinger/sounddose/SoundDoseManager.cpp
@@ -21,10 +21,12 @@
 #include "SoundDoseManager.h"
 
 #include "android/media/SoundDoseRecord.h"
+#include <algorithm>
 #include <android-base/stringprintf.h>
-#include <media/AidlConversionCppNdk.h>
 #include <cinttypes>
 #include <ctime>
+#include <functional>
+#include <media/AidlConversionCppNdk.h>
 #include <utils/Log.h>
 
 namespace android {
@@ -46,6 +48,8 @@
     return now_ts.tv_sec;
 }
 
+constexpr float kDefaultRs2LowerBound = 80.f;  // dBA
+
 }  // namespace
 
 sp<audio_utils::MelProcessor> SoundDoseManager::getOrCreateProcessorForDevice(
@@ -53,7 +57,7 @@
         size_t channelCount, audio_format_t format) {
     const std::lock_guard _l(mLock);
 
-    if (mHalSoundDose.size() > 0 && mEnabledCsd) {
+    if (!mUseFrameworkMel && mHalSoundDose.size() > 0 && mEnabledCsd) {
         ALOGD("%s: using HAL MEL computation, no MelProcessor needed.", __func__);
         return nullptr;
     }
@@ -143,7 +147,7 @@
     ALOGV("%s", __func__);
     const std::lock_guard _l(mLock);
 
-    if (mHalSoundDose.size() > 0) {
+    if (!mUseFrameworkMel && mHalSoundDose.size() > 0) {
         bool success = true;
         for (auto& halSoundDose : mHalSoundDose) {
             // using the HAL sound dose interface
@@ -187,6 +191,21 @@
     }
 }
 
+float SoundDoseManager::getAttenuationForDeviceId(audio_port_handle_t id) const {
+    float attenuation = 0.f;
+
+    const std::lock_guard _l(mLock);
+    const auto deviceTypeIt = mActiveDeviceTypes.find(id);
+    if (deviceTypeIt != mActiveDeviceTypes.end()) {
+        auto attenuationIt = mMelAttenuationDB.find(deviceTypeIt->second);
+        if (attenuationIt != mMelAttenuationDB.end()) {
+            attenuation = attenuationIt->second;
+        }
+    }
+
+    return attenuation;
+}
+
 audio_port_handle_t SoundDoseManager::getIdForAudioDevice(const AudioDevice& audioDevice) const {
     if (isComputeCsdForcedOnAllDevices()) {
         // If CSD is forced on all devices return random port id. Used only in testing.
@@ -212,6 +231,13 @@
         ALOGI("%s: could not find port id for device %s", __func__, adt.toString().c_str());
         return AUDIO_PORT_HANDLE_NONE;
     }
+    const auto btDeviceIt = mBluetoothDevicesWithCsd.find(std::make_pair(address, type));
+    if (btDeviceIt != mBluetoothDevicesWithCsd.end()) {
+        if (!btDeviceIt->second) {
+            ALOGI("%s: bt device %s does not support sound dose", __func__, adt.toString().c_str());
+            return AUDIO_PORT_HANDLE_NONE;
+        }
+    }
     return deviceIt->second;
 }
 
@@ -260,7 +286,11 @@
                 in_audioDevice.address.toString().c_str());
         return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
     }
-    soundDoseManager->onMomentaryExposure(in_currentDbA, id);
+
+    float attenuation = soundDoseManager->getAttenuationForDeviceId(id);
+    ALOGV("%s: attenuating received momentary exposure with %f dB", __func__, attenuation);
+    // TODO: remove attenuation when enforcing HAL MELs to always be attenuated
+    soundDoseManager->onMomentaryExposure(in_currentDbA - attenuation, id);
 
     return ndk::ScopedAStatus::ok();
 }
@@ -289,9 +319,10 @@
                 in_audioDevice.address.toString().c_str());
         return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
     }
+
     // TODO: introduce timestamp in onNewMelValues callback
-    soundDoseManager->onNewMelValues(in_melRecord.melValues, 0,
-                                     in_melRecord.melValues.size(), id);
+    soundDoseManager->onNewMelValues(in_melRecord.melValues, 0, in_melRecord.melValues.size(),
+                                     id, /*attenuated=*/false);
 
     return ndk::ScopedAStatus::ok();
 }
@@ -549,9 +580,6 @@
 }
 
 void SoundDoseManager::setUseFrameworkMel(bool useFrameworkMel) {
-    // invalidate any HAL sound dose interface used
-    resetHalSoundDoseInterfaces();
-
     const std::lock_guard _l(mLock);
     mUseFrameworkMel = useFrameworkMel;
 }
@@ -562,8 +590,19 @@
 }
 
 void SoundDoseManager::setComputeCsdOnAllDevices(bool computeCsdOnAllDevices) {
-    const std::lock_guard _l(mLock);
-    mComputeCsdOnAllDevices = computeCsdOnAllDevices;
+    bool changed = false;
+    {
+        const std::lock_guard _l(mLock);
+        if (mHalSoundDose.size() != 0) {
+            // when using the HAL path we cannot enforce to deliver values for all devices
+            changed = mUseFrameworkMel != computeCsdOnAllDevices;
+            mUseFrameworkMel = computeCsdOnAllDevices;
+        }
+        mComputeCsdOnAllDevices = computeCsdOnAllDevices;
+    }
+    if (changed && computeCsdOnAllDevices) {
+        mMelReporterCallback->applyAllAudioPatches();
+    }
 }
 
 bool SoundDoseManager::isComputeCsdForcedOnAllDevices() const {
@@ -582,7 +621,7 @@
 
 bool SoundDoseManager::useHalSoundDose() const {
     const std::lock_guard _l(mLock);
-    return mHalSoundDose.size() > 0;
+    return !mUseFrameworkMel && mHalSoundDose.size() > 0;
 }
 
 void SoundDoseManager::resetSoundDose() {
@@ -604,26 +643,68 @@
 }
 
 void SoundDoseManager::onNewMelValues(const std::vector<float>& mels, size_t offset, size_t length,
-                                      audio_port_handle_t deviceId) const {
+                                      audio_port_handle_t deviceId, bool attenuated) const {
     ALOGV("%s", __func__);
 
-
     sp<media::ISoundDoseCallback> soundDoseCallback;
     std::vector<audio_utils::CsdRecord> records;
     float currentCsd;
+
+    // TODO: delete this case when enforcing HAL MELs to always be attenuated
+    float attenuation = attenuated ? 0.0f : getAttenuationForDeviceId(deviceId);
+
     {
         const std::lock_guard _l(mLock);
         if (!mEnabledCsd) {
             return;
         }
 
-
         const int64_t timestampSec = getMonotonicSecond();
 
-        // only for internal callbacks
-        records = mMelAggregator->aggregateAndAddNewMelRecord(audio_utils::MelRecord(
-                deviceId, std::vector<float>(mels.begin() + offset, mels.begin() + offset + length),
-                timestampSec - length));
+        if (attenuated) {
+            records = mMelAggregator->aggregateAndAddNewMelRecord(audio_utils::MelRecord(
+                    deviceId,
+                    std::vector<float>(mels.begin() + offset, mels.begin() + offset + length),
+                    timestampSec - length));
+        } else {
+            ALOGV("%s: attenuating received values with %f dB", __func__, attenuation);
+
+            // Extracting all intervals that contain values >= RS2 low limit (80dBA) after the
+            // attenuation is applied
+            size_t start = offset;
+            size_t stop = offset;
+            for (; stop < mels.size() && stop < offset + length; ++stop) {
+                if (mels[stop] - attenuation < kDefaultRs2LowerBound) {
+                    if (start < stop) {
+                        std::vector<float> attMel(stop-start, -attenuation);
+                        // attMel[i] = mels[i] + attenuation, i in [start, stop)
+                        std::transform(mels.begin() + start, mels.begin() + stop, attMel.begin(),
+                                       attMel.begin(), std::plus<float>());
+                        std::vector<audio_utils::CsdRecord> newRec =
+                                mMelAggregator->aggregateAndAddNewMelRecord(
+                                        audio_utils::MelRecord(deviceId,
+                                                               attMel,
+                                                               timestampSec - length + start -
+                                                               offset));
+                        std::copy(newRec.begin(), newRec.end(), std::back_inserter(records));
+                    }
+                    start = stop+1;
+                }
+            }
+            if (start < stop) {
+                std::vector<float> attMel(stop-start, -attenuation);
+                // attMel[i] = mels[i] + attenuation, i in [start, stop)
+                std::transform(mels.begin() + start, mels.begin() + stop, attMel.begin(),
+                               attMel.begin(), std::plus<float>());
+                std::vector<audio_utils::CsdRecord> newRec =
+                        mMelAggregator->aggregateAndAddNewMelRecord(
+                                audio_utils::MelRecord(deviceId,
+                                                       attMel,
+                                                       timestampSec - length + start -
+                                                       offset));
+                std::copy(newRec.begin(), newRec.end(), std::back_inserter(records));
+            }
+        }
 
         currentCsd = mMelAggregator->getCsd();
     }
@@ -658,6 +739,10 @@
         if (!mEnabledCsd) {
             return;
         }
+
+        if (currentMel < mRs2UpperBound) {
+            return;
+        }
     }
 
     auto soundDoseCallback = getSoundDoseCallback();
diff --git a/services/audioflinger/sounddose/SoundDoseManager.h b/services/audioflinger/sounddose/SoundDoseManager.h
index 347eabe..52a3fd6 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.h
+++ b/services/audioflinger/sounddose/SoundDoseManager.h
@@ -39,6 +39,8 @@
 
     virtual void stopMelComputationForDeviceId(audio_port_handle_t deviceId) = 0;
     virtual void startMelComputationForDeviceId(audio_port_handle_t deviceId) = 0;
+
+    virtual void applyAllAudioPatches() = 0;
 };
 
 class SoundDoseManager : public audio_utils::MelProcessor::MelCallback {
@@ -53,6 +55,13 @@
           mMelAggregator(sp<audio_utils::MelAggregator>::make(kCsdWindowSeconds)),
           mRs2UpperBound(kDefaultRs2UpperBound) {};
 
+    // Used only for testing
+    SoundDoseManager(const sp<IMelReporterCallback>& melReporterCallback,
+                     const sp<audio_utils::MelAggregator>& melAggregator)
+            : mMelReporterCallback(melReporterCallback),
+              mMelAggregator(melAggregator),
+              mRs2UpperBound(kDefaultRs2UpperBound) {};
+
     /**
      * \brief Creates or gets the MelProcessor assigned to the streamHandle
      *
@@ -144,7 +153,7 @@
 
     // ------ Override audio_utils::MelProcessor::MelCallback ------
     void onNewMelValues(const std::vector<float>& mels, size_t offset, size_t length,
-                        audio_port_handle_t deviceId) const override;
+                        audio_port_handle_t deviceId, bool attenuated) const override;
 
     void onMomentaryExposure(float currentMel, audio_port_handle_t deviceId) const override;
 
@@ -205,6 +214,8 @@
 
     sp<media::ISoundDoseCallback> getSoundDoseCallback() const;
 
+    float getAttenuationForDeviceId(audio_port_handle_t id) const;
+
     void updateAttenuation(float attenuationDB, audio_devices_t deviceType);
     void setCsdEnabled(bool enabled);
     void setUseFrameworkMel(bool useFrameworkMel);
diff --git a/services/audioflinger/sounddose/tests/Android.bp b/services/audioflinger/sounddose/tests/Android.bp
index 2a2addf..60e170d 100644
--- a/services/audioflinger/sounddose/tests/Android.bp
+++ b/services/audioflinger/sounddose/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_base_license"
@@ -11,7 +12,7 @@
     name: "sounddosemanager_tests",
 
     srcs: [
-        "sounddosemanager_tests.cpp"
+        "sounddosemanager_tests.cpp",
     ],
 
     defaults: [
diff --git a/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp b/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp
index 294080b..e79b05e 100644
--- a/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp
+++ b/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp
@@ -20,6 +20,7 @@
 #include <SoundDoseManager.h>
 
 #include <aidl/android/hardware/audio/core/sounddose/BnSoundDose.h>
+#include <audio_utils/MelAggregator.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <media/AidlConversionCppNdk.h>
@@ -43,6 +44,15 @@
 public:
     MOCK_METHOD(void, startMelComputationForDeviceId, (audio_port_handle_t), (override));
     MOCK_METHOD(void, stopMelComputationForDeviceId, (audio_port_handle_t), (override));
+    MOCK_METHOD(void, applyAllAudioPatches, (), (override));
+};
+
+class MelAggregatorMock : public audio_utils::MelAggregator {
+public:
+    MelAggregatorMock() : MelAggregator(100) {}
+
+    MOCK_METHOD(std::vector<audio_utils::CsdRecord>, aggregateAndAddNewMelRecord,
+                (const audio_utils::MelRecord&), (override));
 };
 
 constexpr char kPrimaryModule[] = "primary";
@@ -52,7 +62,8 @@
 protected:
     void SetUp() override {
         mMelReporterCallback = sp<MelReporterCallback>::make();
-        mSoundDoseManager = sp<SoundDoseManager>::make(mMelReporterCallback);
+        mMelAggregator = sp<MelAggregatorMock>::make();
+        mSoundDoseManager = sp<SoundDoseManager>::make(mMelReporterCallback, mMelAggregator);
         mHalSoundDose = ndk::SharedRefBase::make<HalSoundDoseMock>();
         mSecondaryHalSoundDose = ndk::SharedRefBase::make<HalSoundDoseMock>();
 
@@ -69,6 +80,7 @@
     }
 
     sp<MelReporterCallback> mMelReporterCallback;
+    sp<MelAggregatorMock> mMelAggregator;
     sp<SoundDoseManager> mSoundDoseManager;
     std::shared_ptr<HalSoundDoseMock> mHalSoundDose;
     std::shared_ptr<HalSoundDoseMock> mSecondaryHalSoundDose;
@@ -110,12 +122,33 @@
     EXPECT_NE(processor1, processor2);
 }
 
-TEST_F(SoundDoseManagerTest, NewMelValuesCacheNewRecord) {
-    std::vector<float>mels{1, 1};
+TEST_F(SoundDoseManagerTest, NewMelValuesAttenuatedAggregateMels) {
+    std::vector<float>mels{1.f, 1.f};
 
-    mSoundDoseManager->onNewMelValues(mels, 0, mels.size(), /*deviceId=*/1);
+    EXPECT_CALL(*mMelAggregator.get(), aggregateAndAddNewMelRecord)
+            .Times(1)
+            .WillOnce([&] (const audio_utils::MelRecord& record) {
+                EXPECT_THAT(record.mels, ::testing::ElementsAreArray(mels));
+                return std::vector<audio_utils::CsdRecord>();
+            });
 
-    EXPECT_EQ(mSoundDoseManager->getCachedMelRecordsSize(), size_t{1});
+    mSoundDoseManager->onNewMelValues(mels, 0, mels.size(), /*deviceId=*/1,
+                                      /*attenuated=*/true);
+}
+
+TEST_F(SoundDoseManagerTest, NewMelValuesUnattenuatedAreSplit) {
+    std::vector<float>mels{79.f, 80.f, 79.f, 80.f, 79.f, 79.f, 80.f};
+
+    EXPECT_CALL(*mMelAggregator.get(), aggregateAndAddNewMelRecord)
+            .Times(3)
+            .WillRepeatedly([&] (const audio_utils::MelRecord& record) {
+                EXPECT_EQ(record.mels.size(), size_t {1});
+                EXPECT_EQ(record.mels[0], 80.f);
+                return std::vector<audio_utils::CsdRecord>();
+            });
+
+    mSoundDoseManager->onNewMelValues(mels, 0, mels.size(), /*deviceId=*/1,
+            /*attenuated=*/false);
 }
 
 TEST_F(SoundDoseManagerTest, InvalidHalInterfaceIsNotSet) {
diff --git a/services/audioparameterparser/Android.bp b/services/audioparameterparser/Android.bp
index 18205bd..b3da333 100644
--- a/services/audioparameterparser/Android.bp
+++ b/services/audioparameterparser/Android.bp
@@ -57,7 +57,6 @@
     relative_install_path: "hw",
 
     init_rc: ["android.hardware.audio.parameter_parser.example_service.rc"],
-    vintf_fragments: ["android.hardware.audio.parameter_parser.example_service.xml"],
 
     defaults: [
         "android.hardware.audio.parameter_parser.example_defaults",
diff --git a/services/audioparameterparser/android.hardware.audio.parameter_parser.example_service.xml b/services/audioparameterparser/android.hardware.audio.parameter_parser.example_service.xml
deleted file mode 100644
index 91addaa..0000000
--- a/services/audioparameterparser/android.hardware.audio.parameter_parser.example_service.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<manifest version="1.0" type="framework">
-  <hal format="aidl">
-    <name>android.media.audio</name>
-    <version>1</version>
-    <fqname>IHalAdapterVendorExtension/default</fqname>
-  </hal>
-</manifest>
diff --git a/services/audiopolicy/Android.bp b/services/audiopolicy/Android.bp
index e018dd3..66ba7e2 100644
--- a/services/audiopolicy/Android.bp
+++ b/services/audiopolicy/Android.bp
@@ -1,10 +1,11 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
     // to get the below license kinds:
     //   SPDX-license-identifier-Apache-2.0
-    default_applicable_licenses: ["frameworks_av_license"],
+    default_applicable_licenses: ["Android-Apache-2.0"],
 }
 
 cc_library_headers {
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index b164159..bfc3132 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -269,6 +269,7 @@
 
     virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes) = 0;
     virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes) = 0;
+    virtual status_t getRegisteredPolicyMixes(std::vector<AudioMix>& mixes) = 0;
 
     virtual status_t updatePolicyMix(
         const AudioMix& mix,
@@ -285,7 +286,8 @@
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
                                       audio_port_handle_t *portId,
-                                      uid_t uid) = 0;
+                                      uid_t uid,
+                                      bool internal = false) = 0;
     virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
diff --git a/services/audiopolicy/common/Android.bp b/services/audiopolicy/common/Android.bp
index 91701ad..a699b8b 100644
--- a/services/audiopolicy/common/Android.bp
+++ b/services/audiopolicy/common/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index 8b76842..598d52d 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 13b70e5..7c70877 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -364,7 +364,7 @@
 
     void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     virtual DeviceVector devices() const;
-    void setDevices(const DeviceVector &devices) { mDevices = devices; }
+    void setDevices(const DeviceVector &devices);
     bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
     virtual DeviceVector supportedDevices() const;
     virtual bool devicesSupportEncodedFormats(const DeviceTypeSet& deviceTypes);
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 7119b85..fe90a1e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -222,7 +222,8 @@
                            const struct audio_port_config &config,
                            const sp<DeviceDescriptor>& srcDevice,
                            audio_stream_type_t stream, product_strategy_t strategy,
-                           VolumeSource volumeSource);
+                           VolumeSource volumeSource,
+                           bool isInternal);
 
     ~SourceClientDescriptor() override = default;
 
@@ -248,6 +249,7 @@
     void setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput, bool closeOutput = false);
     wp<HwAudioOutputDescriptor> hwOutput() const { return mHwOutput; }
     void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
+    bool isInternal() const override { return mIsInternal; }
 
     using ClientDescriptor::dump;
     void dump(String8 *dst, int spaces) const override;
@@ -268,34 +270,17 @@
      * behavior of AudioDeviceCallback.
      */
     bool mCloseOutput = false;
-};
-
-/**
- * @brief The InternalSourceClientDescriptor class
- * Specialized Client Descriptor for either a raw patch created from @see createAudioPatch API
- * or for internal audio patches managed by APM (e.g. phone call patches).
- * Whatever the bridge created (software or hardware), we need a client to track the activity
- * and manage volumes.
- * The Audio Patch requested sink is expressed as a preferred device which allows to route
- * the SwOutput. Then APM will performs checks on the UID (against UID of Audioserver) of the
- * requester to prevent rerouting SwOutput involved in raw patches.
- */
-class InternalSourceClientDescriptor: public SourceClientDescriptor
-{
-public:
-    InternalSourceClientDescriptor(
-            audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
-            const struct audio_port_config &config, const sp<DeviceDescriptor>& srcDevice,
-             const sp<DeviceDescriptor>& sinkDevice,
-            product_strategy_t strategy, VolumeSource volumeSource) :
-        SourceClientDescriptor(
-            portId, uid, attributes, config, srcDevice, AUDIO_STREAM_PATCH, strategy,
-            volumeSource)
-    {
-        setPreferredDeviceId(sinkDevice->getId());
-    }
-    bool isInternal() const override { return true; }
-    ~InternalSourceClientDescriptor() override = default;
+    /**
+     * True for specialized Client Descriptor for either a raw patch created from
+     * @see createAudioPatch API or for internal audio patches managed by APM
+     * (e.g. phone call patches).
+     * Whatever the bridge created (software or hardware), we need a client to track the activity
+     * and manage volumes.
+     * The Audio Patch requested sink is expressed as a preferred device which allows to route
+     * the SwOutput. Then APM will performs checks on the UID (against UID of Audioserver) of the
+     * requester to prevent rerouting SwOutput involved in raw patches.
+     */
+    bool mIsInternal = false;
 };
 
 class SourceClientCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 6c130fd..c502fc2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -93,6 +93,8 @@
 
     void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
 
+    void setPreferredConfig(const audio_config_base_t * preferredConfig);
+
     void dump(String8 *dst, int spaces, bool verbose = true) const;
 
 private:
@@ -107,6 +109,7 @@
     audio_format_t      mCurrentEncodedFormat;
     bool                mIsDynamic = false;
     std::string         mDeclaredAddress; // Original device address
+    std::optional<audio_config_base_t> mPreferredConfig;
 };
 
 class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index f3a9518..688772c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -70,10 +70,17 @@
         return mMixerBehaviors;
     }
 
+    enum CompatibilityScore{
+        NO_MATCH = 0,
+        PARTIAL_MATCH = 1,
+        EXACT_MATCH = 2
+    };
+
     /**
-     * @brief isCompatibleProfile: This method is used for input and direct output,
+     * @brief compatibilityScore: This method is used for input and direct output,
      * and is not used for other output.
-     * Checks if the IO profile is compatible with specified parameters.
+     * Return the compatibility score to measure how much the IO profile is compatible
+     * with specified parameters.
      * For input, flags is interpreted as audio_input_flags_t.
      * TODO: merge audio_output_flags_t and audio_input_flags_t.
      *
@@ -86,18 +93,18 @@
      * @param updatedChannelMask if non-NULL, it is assigned the actual channel mask
      * @param flags to be checked for compatibility
      * @param exactMatchRequiredForInputFlags true if exact match is required on flags
-     * @return true if the profile is compatible, false otherwise.
+     * @return how the IO profile is compatible with the given parameters.
      */
-    bool isCompatibleProfile(const DeviceVector &devices,
-                             uint32_t samplingRate,
-                             uint32_t *updatedSamplingRate,
-                             audio_format_t format,
-                             audio_format_t *updatedFormat,
-                             audio_channel_mask_t channelMask,
-                             audio_channel_mask_t *updatedChannelMask,
-                             // FIXME parameter type
-                             uint32_t flags,
-                             bool exactMatchRequiredForInputFlags = false) const;
+    CompatibilityScore getCompatibilityScore(const DeviceVector &devices,
+                                             uint32_t samplingRate,
+                                             uint32_t *updatedSamplingRate,
+                                             audio_format_t format,
+                                             audio_format_t *updatedFormat,
+                                             audio_channel_mask_t channelMask,
+                                             audio_channel_mask_t *updatedChannelMask,
+                                             // FIXME parameter type
+                                             uint32_t flags,
+                                             bool exactMatchRequiredForInputFlags = false) const;
 
     /**
      * @brief areAllDevicesSupported: Checks if the given devices are supported by the IO profile.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index d027564..6537a00 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -778,6 +778,19 @@
     }
 }
 
+void SwAudioOutputDescriptor::setDevices(const android::DeviceVector &devices) {
+    if ((mFlags & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_BIT_PERFECT) {
+        for (auto device : mDevices) {
+            device->setPreferredConfig(nullptr);
+        }
+        auto config = getConfig();
+        for (auto device : devices) {
+            device->setPreferredConfig(&config);
+        }
+    }
+    mDevices = devices;
+}
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 8b6866e..2aee501 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -96,12 +96,12 @@
 SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
          audio_attributes_t attributes, const struct audio_port_config &config,
          const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream,
-         product_strategy_t strategy, VolumeSource volumeSource) :
+         product_strategy_t strategy, VolumeSource volumeSource, bool isInternal) :
     TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
         {config.sample_rate, config.channel_mask, config.format}, AUDIO_PORT_HANDLE_NONE,
         stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
         {} /* Sources do not support secondary outputs*/, nullptr),
-    mSrcDevice(srcDevice)
+    mSrcDevice(srcDevice), mIsInternal(isInternal)
 {
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index fe25693..9f7b8fc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -132,6 +132,20 @@
 {
     DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
     dstConfig->ext.device.hw_module = getModuleHandle();
+    if (mPreferredConfig.has_value()) {
+        if (mPreferredConfig->format != AUDIO_FORMAT_DEFAULT) {
+            dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
+            dstConfig->format = mPreferredConfig->format;
+        }
+        if (mPreferredConfig->sample_rate != 0) {
+            dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
+            dstConfig->sample_rate = mPreferredConfig->sample_rate;
+        }
+        if (mPreferredConfig->channel_mask != AUDIO_CHANNEL_NONE) {
+            dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
+            dstConfig->channel_mask = mPreferredConfig->channel_mask;
+        }
+    }
 }
 
 void DeviceDescriptor::toAudioPort(struct audio_port *port) const
@@ -183,6 +197,14 @@
     }
 }
 
+void DeviceDescriptor::setPreferredConfig(const audio_config_base_t* preferredConfig) {
+    if (preferredConfig == nullptr) {
+        mPreferredConfig.reset();
+    } else {
+        mPreferredConfig = *preferredConfig;
+    }
+}
+
 void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
 {
     String8 extraInfo;
@@ -193,6 +215,13 @@
     std::string descBaseDumpStr;
     DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.c_str(), verbose);
     dst->append(descBaseDumpStr.c_str());
+
+    if (mPreferredConfig.has_value()) {
+        dst->append(base::StringPrintf(
+                "%*sPreferred Config: format=%#x, channelMask=%#x, sampleRate=%u\n",
+                spaces, "", mPreferredConfig.value().format, mPreferredConfig.value().channel_mask,
+                mPreferredConfig.value().sample_rate).c_str());
+    }
 }
 
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index c7d2e6b..d9fbd89 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -33,17 +33,17 @@
     }
 }
 
-bool IOProfile::isCompatibleProfile(const DeviceVector &devices,
-                                    uint32_t samplingRate,
-                                    uint32_t *updatedSamplingRate,
-                                    audio_format_t format,
-                                    audio_format_t *updatedFormat,
-                                    audio_channel_mask_t channelMask,
-                                    audio_channel_mask_t *updatedChannelMask,
-                                    // FIXME type punning here
-                                    uint32_t flags,
-                                    bool exactMatchRequiredForInputFlags) const
-{
+IOProfile::CompatibilityScore IOProfile::getCompatibilityScore(
+        const android::DeviceVector &devices,
+        uint32_t samplingRate,
+        uint32_t *updatedSamplingRate,
+        audio_format_t format,
+        audio_format_t *updatedFormat,
+        audio_channel_mask_t channelMask,
+        audio_channel_mask_t *updatedChannelMask,
+        // FIXME type punning here
+        uint32_t flags,
+        bool exactMatchRequiredForInputFlags) const {
     const bool isPlaybackThread =
             getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
     const bool isRecordThread =
@@ -51,13 +51,13 @@
     ALOG_ASSERT(isPlaybackThread != isRecordThread);
     if (!areAllDevicesSupported(devices) ||
             !isCompatibleProfileForFlags(flags, exactMatchRequiredForInputFlags)) {
-        return false;
+        return NO_MATCH;
     }
 
     if (!audio_is_valid_format(format) ||
             (isPlaybackThread && (samplingRate == 0 || !audio_is_output_channel(channelMask))) ||
             (isRecordThread && (!audio_is_input_channel(channelMask)))) {
-         return false;
+         return NO_MATCH;
     }
 
     audio_format_t myUpdatedFormat = format;
@@ -69,32 +69,40 @@
         .channel_mask = channelMask,
         .format = format,
     };
+    auto result = NO_MATCH;
     if (isRecordThread)
     {
         if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             if (checkExactAudioProfile(&config) != NO_ERROR) {
-                return false;
+                return result;
             }
-        } else if (checkExactAudioProfile(&config) != NO_ERROR && checkCompatibleAudioProfile(
-                myUpdatedSamplingRate, myUpdatedChannelMask, myUpdatedFormat) != NO_ERROR) {
-            return false;
+            result = EXACT_MATCH;
+        } else if (checkExactAudioProfile(&config) == NO_ERROR) {
+            result = EXACT_MATCH;
+        } else if (checkCompatibleAudioProfile(
+                myUpdatedSamplingRate, myUpdatedChannelMask, myUpdatedFormat) == NO_ERROR) {
+            result = PARTIAL_MATCH;
+        } else {
+            return result;
         }
     } else {
-        if (checkExactAudioProfile(&config) != NO_ERROR) {
-            return false;
+        if (checkExactAudioProfile(&config) == NO_ERROR) {
+            result = EXACT_MATCH;
+        } else {
+            return result;
         }
     }
 
-    if (updatedSamplingRate != NULL) {
+    if (updatedSamplingRate != nullptr) {
         *updatedSamplingRate = myUpdatedSamplingRate;
     }
-    if (updatedFormat != NULL) {
+    if (updatedFormat != nullptr) {
         *updatedFormat = myUpdatedFormat;
     }
-    if (updatedChannelMask != NULL) {
+    if (updatedChannelMask != nullptr) {
         *updatedChannelMask = myUpdatedChannelMask;
     }
-    return true;
+    return result;
 }
 
 bool IOProfile::areAllDevicesSupported(const DeviceVector &devices) const {
diff --git a/services/audiopolicy/config/Android.bp b/services/audiopolicy/config/Android.bp
index 86600f4..7d529df 100644
--- a/services/audiopolicy/config/Android.bp
+++ b/services/audiopolicy/config/Android.bp
@@ -18,6 +18,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -31,41 +32,49 @@
     vendor: true,
     src: ":a2dp_in_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "a2dp_audio_policy_configuration.xml",
     vendor: true,
     src: ":a2dp_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "audio_policy_configuration.xml",
     vendor: true,
     src: ":audio_policy_configuration_generic",
 }
+
 prebuilt_etc {
     name: "r_submix_audio_policy_configuration.xml",
     vendor: true,
     src: ":r_submix_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "audio_policy_volumes.xml",
     vendor: true,
     src: ":audio_policy_volumes",
 }
+
 prebuilt_etc {
     name: "default_volume_tables.xml",
     vendor: true,
     src: ":default_volume_tables",
 }
+
 prebuilt_etc {
     name: "surround_sound_configuration_5_0.xml",
     vendor: true,
     src: ":surround_sound_configuration_5_0",
 }
+
 prebuilt_etc {
     name: "usb_audio_policy_configuration.xml",
     vendor: true,
     src: ":usb_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "primary_audio_policy_configuration.xml",
     src: ":primary_audio_policy_configuration",
@@ -76,50 +85,62 @@
     name: "a2dp_in_audio_policy_configuration",
     srcs: ["a2dp_in_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "a2dp_audio_policy_configuration",
     srcs: ["a2dp_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "primary_audio_policy_configuration",
     srcs: ["primary_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "surround_sound_configuration_5_0",
     srcs: ["surround_sound_configuration_5_0.xml"],
 }
+
 filegroup {
     name: "default_volume_tables",
     srcs: ["default_volume_tables.xml"],
 }
+
 filegroup {
     name: "audio_policy_volumes",
     srcs: ["audio_policy_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_configuration_generic",
     srcs: ["audio_policy_configuration_generic.xml"],
 }
+
 filegroup {
     name: "audio_policy_configuration_generic_configurable",
     srcs: ["audio_policy_configuration_generic_configurable.xml"],
 }
+
 filegroup {
     name: "usb_audio_policy_configuration",
     srcs: ["usb_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "r_submix_audio_policy_configuration",
     srcs: ["r_submix_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "bluetooth_audio_policy_configuration_7_0",
     srcs: ["bluetooth_audio_policy_configuration_7_0.xml"],
 }
+
 filegroup {
     name: "bluetooth_with_le_audio_policy_configuration_7_0",
     srcs: ["bluetooth_with_le_audio_policy_configuration_7_0.xml"],
 }
+
 filegroup {
     name: "hearing_aid_audio_policy_configuration_7_0",
     srcs: ["hearing_aid_audio_policy_configuration_7_0.xml"],
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
index 0034a04..a93c816 100644
--- a/services/audiopolicy/engine/common/Android.bp
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/config/Android.bp b/services/audiopolicy/engine/config/Android.bp
index 12597de..0864e6a 100644
--- a/services/audiopolicy/engine/config/Android.bp
+++ b/services/audiopolicy/engine/config/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/config/tests/Android.bp b/services/audiopolicy/engine/config/tests/Android.bp
index 5d1aa16..8c7b7db 100644
--- a/services/audiopolicy/engine/config/tests/Android.bp
+++ b/services/audiopolicy/engine/config/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/config/tests/resources/Android.bp b/services/audiopolicy/engine/config/tests/resources/Android.bp
index 9cee978..99d62a3 100644
--- a/services/audiopolicy/engine/config/tests/resources/Android.bp
+++ b/services/audiopolicy/engine/config/tests/resources/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/interface/Android.bp b/services/audiopolicy/engine/interface/Android.bp
index 5dd5adb..b1f7666 100644
--- a/services/audiopolicy/engine/interface/Android.bp
+++ b/services/audiopolicy/engine/interface/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index eb2e2f4..d59ab5a 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -40,7 +41,7 @@
         "libaudiopolicyengineconfigurable_pfwwrapper",
 
     ],
-  shared_libs: [
+    shared_libs: [
         "libaudio_aidl_conversion_common_cpp",
         "libaudiofoundation",
         "libaudiopolicycomponents",
diff --git a/services/audiopolicy/engineconfigurable/config/Android.bp b/services/audiopolicy/engineconfigurable/config/Android.bp
index b3d1f97..8dd13e8 100644
--- a/services/audiopolicy/engineconfigurable/config/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/Android.bp
@@ -17,6 +17,7 @@
 // Root soong_namespace for common components
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -30,10 +31,12 @@
     vendor: true,
     src: ":audio_policy_engine_criteria",
 }
+
 filegroup {
     name: "audio_policy_engine_criterion_types_template",
     srcs: ["example/common/audio_policy_engine_criterion_types.xml.in"],
 }
+
 filegroup {
     name: "audio_policy_engine_criteria",
     srcs: ["example/common/audio_policy_engine_criteria.xml"],
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp b/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
index e46b60f..fb1a71c 100644
--- a/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
@@ -23,6 +23,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,16 +43,19 @@
         ":audio_policy_engine_volumes.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_volumes.xml",
     vendor: true,
     src: ":audio_policy_engine_volumes",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_criterion_types.xml",
     vendor: true,
@@ -69,6 +73,7 @@
         ":audio_policy_configuration_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_configuration_files",
     srcs: [
@@ -79,18 +84,22 @@
         ":primary_audio_policy_configuration",
     ],
 }
+
 filegroup {
-    name : "audio_policy_configuration_top_file",
+    name: "audio_policy_configuration_top_file",
     srcs: [":audio_policy_configuration_generic"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration",
     srcs: ["audio_policy_engine_configuration.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_volumes",
     srcs: ["audio_policy_engine_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp b/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
index ad6eeb1..b9abb54 100644
--- a/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
@@ -24,6 +24,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -43,11 +44,13 @@
         ":audio_policy_engine_volumes.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_criterion_types.xml",
     vendor: true,
@@ -65,6 +68,7 @@
         ":audio_policy_configuration_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_configuration_files",
     srcs: [
@@ -75,10 +79,12 @@
         ":primary_audio_policy_configuration",
     ],
 }
+
 filegroup {
-    name : "audio_policy_configuration_top_file",
+    name: "audio_policy_configuration_top_file",
     srcs: [":audio_policy_configuration_generic"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp b/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
index 773a99a..67a6128 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
@@ -23,6 +23,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,21 +43,25 @@
         ":audio_policy_engine_volumes.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_stream_volumes.xml",
     vendor: true,
     src: ":audio_policy_engine_stream_volumes",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_default_stream_volumes.xml",
     vendor: true,
     src: ":audio_policy_engine_default_stream_volumes",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_criterion_types.xml",
     vendor: true,
@@ -74,6 +79,7 @@
         ":audio_policy_configuration_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_configuration_files",
     srcs: [
@@ -84,22 +90,27 @@
         ":primary_audio_policy_configuration",
     ],
 }
+
 filegroup {
-    name : "audio_policy_configuration_top_file",
+    name: "audio_policy_configuration_top_file",
     srcs: [":audio_policy_configuration_generic"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration",
     srcs: ["audio_policy_engine_configuration.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_stream_volumes",
     srcs: ["audio_policy_engine_stream_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_default_stream_volumes",
     srcs: ["audio_policy_engine_default_stream_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
index ee62d5e..7fe111f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
@@ -17,6 +17,7 @@
 // Root soong_namespace for common components
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -31,18 +32,21 @@
     src: ":PolicyClass",
     sub_dir: "parameter-framework/Structure/Policy",
 }
+
 prebuilt_etc {
     name: "PolicySubsystem.xml",
     vendor: true,
     src: ":PolicySubsystem",
     sub_dir: "parameter-framework/Structure/Policy",
 }
+
 prebuilt_etc {
     name: "PolicySubsystem-CommonTypes.xml",
     vendor: true,
     src: ":buildcommontypesstructure_gen",
     sub_dir: "parameter-framework/Structure/Policy",
 }
+
 genrule {
     name: "buildcommontypesstructure_gen",
     defaults: ["buildcommontypesstructurerule"],
@@ -52,34 +56,42 @@
     name: "product_strategies_structure_template",
     srcs: ["examples/common/Structure/ProductStrategies.xml.in"],
 }
+
 filegroup {
     name: "PolicySubsystem",
     srcs: ["examples/common/Structure/PolicySubsystem.xml"],
 }
+
 filegroup {
     name: "PolicySubsystem-no-strategy",
     srcs: ["examples/common/Structure/PolicySubsystem-no-strategy.xml"],
 }
+
 filegroup {
     name: "common_types_structure_template",
     srcs: ["examples/common/Structure/PolicySubsystem-CommonTypes.xml.in"],
 }
+
 filegroup {
     name: "PolicyClass",
     srcs: ["examples/common/Structure/PolicyClass.xml"],
 }
+
 filegroup {
     name: "volumes.pfw",
     srcs: ["examples/Settings/volumes.pfw"],
 }
+
 filegroup {
     name: "device_for_input_source.pfw",
     srcs: ["examples/Settings/device_for_input_source.pfw"],
 }
+
 filegroup {
     name: "ParameterFrameworkConfigurationPolicy.userdebug.xml",
     srcs: ["examples/ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "ParameterFrameworkConfigurationPolicy.user.xml",
     srcs: ["examples/ParameterFrameworkConfigurationPolicy.user.xml"],
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
index 7d2d293..38451f2 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
@@ -27,6 +27,7 @@
 // Generate Audio Policy Parameter Framework Product Strategies Structure file from template
 //
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,6 +43,7 @@
     sub_dir: "parameter-framework/Structure/Policy",
     required: ["libpolicy-subsystem"],
 }
+
 genrule {
     name: "buildstrategiesstructure_gen",
     defaults: ["buildstrategiesstructurerule"],
@@ -67,6 +69,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -78,6 +81,7 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -86,11 +90,13 @@
         "Settings/device_for_product_strategies.pfw",
     ],
 }
+
 // This is for Settings generation, must use socket port, so userdebug version is required
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
index f825e5f..eae6ae2 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
@@ -28,6 +28,7 @@
 // Generate Audio Policy Parameter Framework Product Strategies Structure file from template
 //
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -43,6 +44,7 @@
     sub_dir: "parameter-framework/Structure/Policy",
     required: ["libpolicy-subsystem"],
 }
+
 genrule {
     name: "buildstrategiesstructure_gen",
     defaults: ["buildstrategiesstructurerule"],
@@ -68,6 +70,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -79,6 +82,7 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -87,11 +91,13 @@
         "Settings/device_for_product_strategies.pfw",
     ],
 }
+
 // This is for Settings generation, must use socket port, so userdebug version is required
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
index 4a83cbc..4e8654b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
@@ -27,6 +27,7 @@
 // Generate Audio Policy Parameter Framework Product Strategies Structure file from template
 //
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,6 +43,7 @@
     sub_dir: "parameter-framework/Structure/Policy",
     required: ["libpolicy-subsystem"],
 }
+
 genrule {
     name: "buildstrategiesstructure_gen",
     defaults: ["buildstrategiesstructurerule"],
@@ -67,6 +69,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -78,6 +81,7 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -95,11 +99,13 @@
         "Settings/device_for_product_strategy_patch.pfw",
     ],
 }
+
 // This is for Settings generation, must use socket port, so userdebug version is required
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
index 89ab892..e279a8f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
@@ -24,6 +24,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -57,10 +58,12 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
@@ -69,6 +72,7 @@
         ":buildcommontypesstructure_gen",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -76,6 +80,7 @@
         ":volumes.pfw",
     ],
 }
+
 prebuilt_etc {
     name: "PolicySubsystem.xml",
     vendor: true,
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
index 4880547..47b8b54 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
@@ -24,6 +24,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -45,6 +46,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -56,10 +58,12 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
@@ -68,6 +72,7 @@
         ":buildcommontypesstructure_gen",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -76,6 +81,7 @@
         ":device_for_input_source.pfw",
     ],
 }
+
 prebuilt_etc {
     name: "PolicySubsystem.xml",
     vendor: true,
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp
index f7159c5..aa2163e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -40,6 +41,6 @@
         "liblog",
         "libutils",
         "libmedia_helper",
-        "libparameter"
+        "libparameter",
     ],
 }
diff --git a/services/audiopolicy/engineconfigurable/tools/Android.bp b/services/audiopolicy/engineconfigurable/tools/Android.bp
index 3aec064..2f77372 100644
--- a/services/audiopolicy/engineconfigurable/tools/Android.bp
+++ b/services/audiopolicy/engineconfigurable/tools/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -36,13 +37,13 @@
     name: "buildpolicycriteriontypesrule",
     tools: ["buildPolicyCriterionTypes"],
     cmd: "cp $(locations :audio_policy_configuration_files) $(genDir)/. && " +
-         "cp $(location :audio_policy_configuration_top_file) $(genDir)/audio_policy_configuration.xml && " +
-         "$(location buildPolicyCriterionTypes) " +
-         " --androidaudiobaseheader $(location :libaudio_system_audio_base) " +
-         " --androidaudiocommonbaseheader $(location :libaudio_system_audio_common_base) " +
-         "--audiopolicyconfigurationfile $(genDir)/audio_policy_configuration.xml " +
-         "--criteriontypes $(location :audio_policy_engine_criterion_types_template) " +
-         "--outputfile $(out)",
+        "cp $(location :audio_policy_configuration_top_file) $(genDir)/audio_policy_configuration.xml && " +
+        "$(location buildPolicyCriterionTypes) " +
+        " --androidaudiobaseheader $(location :libaudio_system_audio_base) " +
+        " --androidaudiocommonbaseheader $(location :libaudio_system_audio_common_base) " +
+        "--audiopolicyconfigurationfile $(genDir)/audio_policy_configuration.xml " +
+        "--criteriontypes $(location :audio_policy_engine_criterion_types_template) " +
+        "--outputfile $(out)",
     srcs: [
         // The commented inputs must be provided to use this genrule_defaults
         // @todo uncomment if 1428659 is merged":android_audio_base_header_file",
@@ -81,17 +82,17 @@
         "domainGeneratorConnector",
     ],
     cmd: "mkdir -p $(genDir)/Structure/Policy && " +
-         "cp $(locations :audio_policy_pfw_structure_files) $(genDir)/Structure/Policy && " +
-         "cp $(location :audio_policy_pfw_toplevel) $(genDir)/top_level && " +
-         "$(location domainGeneratorPolicy) " +
-         "--validate " +
-         "--domain-generator-tool $(location domainGeneratorConnector) " +
-         "--toplevel-config $(genDir)/top_level " +
-         "--criteria $(location :audio_policy_engine_criteria) " +
-         "--criteriontypes $(location :audio_policy_engine_criterion_types) " +
-         "--add-edds $(locations :edd_files) " +
-         "--schemas-dir external/parameter-framework/upstream/schemas " +
-         " > $(out)",
+        "cp $(locations :audio_policy_pfw_structure_files) $(genDir)/Structure/Policy && " +
+        "cp $(location :audio_policy_pfw_toplevel) $(genDir)/top_level && " +
+        "$(location domainGeneratorPolicy) " +
+        "--validate " +
+        "--domain-generator-tool $(location domainGeneratorConnector) " +
+        "--toplevel-config $(genDir)/top_level " +
+        "--criteria $(location :audio_policy_engine_criteria) " +
+        "--criteriontypes $(location :audio_policy_engine_criterion_types) " +
+        "--add-edds $(locations :edd_files) " +
+        "--schemas-dir external/parameter-framework/upstream/schemas " +
+        " > $(out)",
     srcs: [
         // The commented inputs must be provided to use this genrule_defaults
         // ":audio_policy_pfw_toplevel",
@@ -118,11 +119,11 @@
 genrule_defaults {
     name: "buildstrategiesstructurerule",
     tools: ["buildStrategiesStructureFile"],
-    cmd: "cp $(locations :audio_policy_engine_configuration_files) $(genDir) && ls -l $(genDir) &&"+
-         "$(location buildStrategiesStructureFile) " +
-         "--audiopolicyengineconfigurationfile $(genDir)/audio_policy_engine_configuration.xml "+
-         "--productstrategiesstructurefile $(location :product_strategies_structure_template) " +
-         "--outputfile $(out)",
+    cmd: "cp $(locations :audio_policy_engine_configuration_files) $(genDir) && ls -l $(genDir) &&" +
+        "$(location buildStrategiesStructureFile) " +
+        "--audiopolicyengineconfigurationfile $(genDir)/audio_policy_engine_configuration.xml " +
+        "--productstrategiesstructurefile $(location :product_strategies_structure_template) " +
+        "--outputfile $(out)",
     srcs: [
         // The commented inputs must be provided to use this genrule_defaults
         // ":audio_policy_engine_configuration_files",
@@ -146,9 +147,9 @@
     name: "buildcommontypesstructurerule",
     tools: ["buildCommonTypesStructureFile"],
     cmd: "$(location buildCommonTypesStructureFile) " +
-         "--androidaudiobaseheader $(location :libaudio_system_audio_base) " +
-         "--commontypesstructure $(location :common_types_structure_template) " +
-         "--outputfile $(out)",
+        "--androidaudiobaseheader $(location :libaudio_system_audio_base) " +
+        "--commontypesstructure $(location :common_types_structure_template) " +
+        "--outputfile $(out)",
     srcs: [
         ":common_types_structure_template",
         ":libaudio_system_audio_base",
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.bp b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
index 0ef0b82..a897880 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.bp
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7d4ccab..98adff0 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/enginedefault/config/example/Android.bp b/services/audiopolicy/enginedefault/config/example/Android.bp
index 59a704b..f305c39 100644
--- a/services/audiopolicy/enginedefault/config/example/Android.bp
+++ b/services/audiopolicy/enginedefault/config/example/Android.bp
@@ -20,6 +20,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -38,16 +39,19 @@
         ":audio_policy_engine_product_strategies.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "phone/audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_stream_volumes.xml",
     vendor: true,
     src: "phone/audio_policy_engine_stream_volumes.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_default_stream_volumes.xml",
     vendor: true,
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
index fd240e3..fca02e4 100644
--- a/services/audiopolicy/fuzzer/Android.bp
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/fuzzer/aidl/Android.bp b/services/audiopolicy/fuzzer/aidl/Android.bp
index 38a2cde..8b37d36 100644
--- a/services/audiopolicy/fuzzer/aidl/Android.bp
+++ b/services/audiopolicy/fuzzer/aidl/Android.bp
@@ -16,6 +16,10 @@
  *
  ******************************************************************************/
 
+package {
+    default_team: "trendy_team_android_media_audio_framework",
+}
+
 cc_defaults {
     name: "audiopolicy_aidl_fuzzer_defaults",
     shared_libs: [
diff --git a/services/audiopolicy/fuzzer/resources/Android.bp b/services/audiopolicy/fuzzer/resources/Android.bp
index 22ee256..2a2b83b 100644
--- a/services/audiopolicy/fuzzer/resources/Android.bp
+++ b/services/audiopolicy/fuzzer/resources/Android.bp
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/managerdefault/Android.bp b/services/audiopolicy/managerdefault/Android.bp
index a1785da..2f46d48 100644
--- a/services/audiopolicy/managerdefault/Android.bp
+++ b/services/audiopolicy/managerdefault/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -22,6 +23,7 @@
     export_include_dirs: ["."],
 
     shared_libs: [
+        "com.android.media.audio-aconfig-cc",
         "libaudiofoundation",
         "libaudiopolicycomponents",
         "libcutils",
@@ -42,6 +44,9 @@
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "audioclient-types-aidl-cpp",
+        // Flag support
+        "android.media.audiopolicy-aconfig-cc",
+        "com.android.media.audioserver-aconfig-cc",
     ],
 
     header_libs: [
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 54a35b6..2761480 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -41,6 +41,9 @@
 
 #include <Serializer.h>
 #include <android/media/audio/common/AudioPort.h>
+#include <com_android_media_audio.h>
+#include <android_media_audiopolicy.h>
+#include <com_android_media_audioserver.h>
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
@@ -56,6 +59,9 @@
 
 namespace android {
 
+
+namespace audio_flags = android::media::audiopolicy;
+
 using android::media::audio::common::AudioDevice;
 using android::media::audio::common::AudioDeviceAddress;
 using android::media::audio::common::AudioPortDeviceExt;
@@ -781,7 +787,11 @@
         .ext.device.type = AUDIO_DEVICE_IN_TELEPHONY_RX, .ext.device.address = ""
     };
     const auto aa = mEngine->getAttributesForStreamType(AUDIO_STREAM_VOICE_CALL);
-    mCallRxSourceClient = startAudioSourceInternal(&source, &aa, 0/*uid*/);
+
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    status_t status = startAudioSource(&source, &aa, &portId, 0 /*uid*/, true /*internal*/);
+    ALOGE_IF(status != OK, "%s: failed to start audio source (%d)", __func__, status);
+    mCallRxSourceClient = mAudioSources.valueFor(portId);
     ALOGE_IF(mCallRxSourceClient == nullptr,
              "%s failed to start Telephony Rx AudioSource", __func__);
 }
@@ -814,9 +824,11 @@
 
     struct audio_port_config source = {};
     srcDevice->toAudioPortConfig(&source);
-    mCallTxSourceClient = new InternalSourceClientDescriptor(
-                callTxSourceClientPortId, mUidCached, aa, source, srcDevice, sinkDevice,
-                mCommunnicationStrategy, toVolumeSource(aa));
+    mCallTxSourceClient = new SourceClientDescriptor(
+                callTxSourceClientPortId, mUidCached, aa, source, srcDevice, AUDIO_STREAM_PATCH,
+                mCommunnicationStrategy, toVolumeSource(aa), true);
+    mCallTxSourceClient->setPreferredDeviceId(sinkDevice->getId());
+
     audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
     status_t status = connectAudioSourceToSink(
                 mCallTxSourceClient, sinkDevice, patchBuilder.patch(), patchHandle, mUidCached,
@@ -1043,11 +1055,11 @@
     sp<IOProfile> profile;
     for (const auto& hwModule : hwModules) {
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
-             if (!curProfile->isCompatibleProfile(devices,
+             if (curProfile->getCompatibilityScore(devices,
                      samplingRate, NULL /*updatedSamplingRate*/,
                      format, NULL /*updatedFormat*/,
                      channelMask, NULL /*updatedChannelMask*/,
-                     flags)) {
+                     flags) == IOProfile::NO_MATCH) {
                  continue;
              }
              // reject profiles not corresponding to a device currently available
@@ -1509,11 +1521,30 @@
     }
 
     if (!profile->canOpenNewIo()) {
+        if (!com::android::media::audioserver::direct_track_reprioritization()) {
+            return NAME_NOT_FOUND;
+        } else if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0) {
+            // MMAP gracefully handles lack of an exclusive track resource by mixing
+            // above the audio framework. For AAudio to know that the limit is reached,
+            // return an error.
+            return NAME_NOT_FOUND;
+        } else {
+            // Close outputs on this profile, if available, to free resources for this request
+            for (int i = 0; i < mOutputs.size() && !profile->canOpenNewIo(); i++) {
+                const auto desc = mOutputs.valueAt(i);
+                if (desc->mProfile == profile) {
+                    closeOutput(desc->mIoHandle);
+                }
+            }
+        }
+    }
+
+    // Unable to close streams to find free resources for this request
+    if (!profile->canOpenNewIo()) {
         return NAME_NOT_FOUND;
     }
 
-    sp<SwAudioOutputDescriptor> outputDesc =
-            new SwAudioOutputDescriptor(profile, mpClientInterface);
+    auto outputDesc = sp<SwAudioOutputDescriptor>::make(profile, mpClientInterface);
 
     // An MSD patch may be using the only output stream that can service this request. Release
     // all MSD patches to prioritize this request over any active output on MSD.
@@ -1606,9 +1637,13 @@
         *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_ULTRASOUND);
     }
 
+    // Use the spatializer output if the content can be spatialized, no preferred mixer
+    // was specified and offload or direct playback is not explicitly requested.
     *isSpatialized = false;
     if (mSpatializerOutput != nullptr
-            && canBeSpatializedInt(attr, config, devices.toTypeAddrVector())) {
+            && canBeSpatializedInt(attr, config, devices.toTypeAddrVector())
+            && prefMixerConfigInfo == nullptr
+            && ((*flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0)) {
         *isSpatialized = true;
         return mSpatializerOutput->mIoHandle;
     }
@@ -3835,6 +3870,24 @@
     return res;
 }
 
+status_t AudioPolicyManager::getRegisteredPolicyMixes(std::vector<AudioMix>& _aidl_return) {
+    if (!audio_flags::audio_mix_test_api()) {
+        return INVALID_OPERATION;
+    }
+
+    _aidl_return.clear();
+    _aidl_return.reserve(mPolicyMixes.size());
+    for (const auto &policyMix: mPolicyMixes) {
+        _aidl_return.emplace_back(policyMix->mCriteria, policyMix->mMixType,
+                             policyMix->mFormat, policyMix->mRouteFlags, policyMix->mDeviceAddress,
+                             policyMix->mCbFlags);
+        _aidl_return.back().mDeviceType = policyMix->mDeviceType;
+    }
+
+    ALOGVV("%s() returning %zu registered mixes", __func__, _aidl_return->size());
+    return OK;
+}
+
 status_t AudioPolicyManager::updatePolicyMix(
             const AudioMix& mix,
             const std::vector<AudioMixMatchCriterion>& updatedCriteria) {
@@ -4456,11 +4509,11 @@
             outputDevices = getMsdAudioOutDevices();
         }
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
-            if (!curProfile->isCompatibleProfile(outputDevices,
+            if (curProfile->getCompatibilityScore(outputDevices,
                     config->sample_rate, nullptr /*updatedSamplingRate*/,
                     config->format, nullptr /*updatedFormat*/,
                     config->channel_mask, nullptr /*updatedChannelMask*/,
-                    flags)) {
+                    flags) == IOProfile::NO_MATCH) {
                 continue;
             }
             // reject profiles not corresponding to a device currently available
@@ -4566,15 +4619,17 @@
     for (const auto& hwModule : mHwModules) {
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
             if (curProfile->hasDynamicAudioProfile()
-                    && curProfile->isCompatibleProfile(devices,
-                                                       mixerAttributes->config.sample_rate,
-                                                       nullptr /*updatedSamplingRate*/,
-                                                       mixerAttributes->config.format,
-                                                       nullptr /*updatedFormat*/,
-                                                       mixerAttributes->config.channel_mask,
-                                                       nullptr /*updatedChannelMask*/,
-                                                       flags,
-                                                       false /*exactMatchRequiredForInputFlags*/)) {
+                    && curProfile->getCompatibilityScore(
+                            devices,
+                            mixerAttributes->config.sample_rate,
+                            nullptr /*updatedSamplingRate*/,
+                            mixerAttributes->config.format,
+                            nullptr /*updatedFormat*/,
+                            mixerAttributes->config.channel_mask,
+                            nullptr /*updatedChannelMask*/,
+                            flags,
+                            false /*exactMatchRequiredForInputFlags*/)
+                            != IOProfile::NO_MATCH) {
                 profile = curProfile;
                 break;
             }
@@ -4855,9 +4910,11 @@
     audio_attributes_t attributes = attributes_initializer(AUDIO_USAGE_MEDIA);
     const struct audio_port_config *source = &patch->sources[0];
     sp<SourceClientDescriptor> sourceDesc =
-            new InternalSourceClientDescriptor(
-                portId, uid, attributes, *source, srcDevice, sinkDevice,
-                mEngine->getProductStrategyForAttributes(attributes), toVolumeSource(attributes));
+            new SourceClientDescriptor(
+                portId, uid, attributes, *source, srcDevice, AUDIO_STREAM_PATCH,
+                mEngine->getProductStrategyForAttributes(attributes), toVolumeSource(attributes),
+                true);
+    sourceDesc->setPreferredDeviceId(sinkDevice->getId());
 
     status_t status =
             connectAudioSourceToSink(sourceDesc, sinkDevice, patch, *handle, uid, 0 /* delayMs */);
@@ -4977,14 +5034,15 @@
                 return BAD_VALUE;
             }
 
-            if (!outputDesc->mProfile->isCompatibleProfile(DeviceVector(devDesc),
-                                                           patch->sources[0].sample_rate,
-                                                           NULL,  // updatedSamplingRate
-                                                           patch->sources[0].format,
-                                                           NULL,  // updatedFormat
-                                                           patch->sources[0].channel_mask,
-                                                           NULL,  // updatedChannelMask
-                                                           AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
+            if (outputDesc->mProfile->getCompatibilityScore(
+                    DeviceVector(devDesc),
+                    patch->sources[0].sample_rate,
+                    nullptr,  // updatedSamplingRate
+                    patch->sources[0].format,
+                    nullptr,  // updatedFormat
+                    patch->sources[0].channel_mask,
+                    nullptr,  // updatedChannelMask
+                    AUDIO_OUTPUT_FLAG_NONE /*FIXME*/) == IOProfile::NO_MATCH) {
                 ALOGV("%s profile not supported for device %08x", __func__, devDesc->type());
                 return INVALID_OPERATION;
             }
@@ -5032,17 +5090,18 @@
                 return BAD_VALUE;
             }
 
-            if (!inputDesc->mProfile->isCompatibleProfile(DeviceVector(device),
-                                                          patch->sinks[0].sample_rate,
-                                                          NULL, /*updatedSampleRate*/
-                                                          patch->sinks[0].format,
-                                                          NULL, /*updatedFormat*/
-                                                          patch->sinks[0].channel_mask,
-                                                          NULL, /*updatedChannelMask*/
-                                                          // FIXME for the parameter type,
-                                                          // and the NONE
-                                                          (audio_output_flags_t)
-                                                            AUDIO_INPUT_FLAG_NONE)) {
+            if (inputDesc->mProfile->getCompatibilityScore(
+                    DeviceVector(device),
+                    patch->sinks[0].sample_rate,
+                    nullptr, /*updatedSampleRate*/
+                    patch->sinks[0].format,
+                    nullptr, /*updatedFormat*/
+                    patch->sinks[0].channel_mask,
+                    nullptr, /*updatedChannelMask*/
+                    // FIXME for the parameter type,
+                    // and the NONE
+                    (audio_output_flags_t)
+                    AUDIO_INPUT_FLAG_NONE) == IOProfile::NO_MATCH) {
                 return INVALID_OPERATION;
             }
             // TODO: reconfigure output format and channels here
@@ -5524,7 +5583,7 @@
 status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
                                               const audio_attributes_t *attributes,
                                               audio_port_handle_t *portId,
-                                              uid_t uid)
+                                              uid_t uid, bool internal)
 {
     ALOGV("%s", __FUNCTION__);
     *portId = AUDIO_PORT_HANDLE_NONE;
@@ -5557,7 +5616,7 @@
         new SourceClientDescriptor(*portId, uid, *attributes, *source, srcDevice,
                                    mEngine->getStreamTypeForAttributes(*attributes),
                                    mEngine->getProductStrategyForAttributes(*attributes),
-                                   toVolumeSource(*attributes));
+                                   toVolumeSource(*attributes), internal);
 
     status_t status = connectAudioSource(sourceDesc);
     if (status == NO_ERROR) {
@@ -5566,18 +5625,6 @@
     return status;
 }
 
-sp<SourceClientDescriptor> AudioPolicyManager::startAudioSourceInternal(
-        const struct audio_port_config *source, const audio_attributes_t *attributes, uid_t uid)
-{
-    ALOGV("%s", __FUNCTION__);
-    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
-
-    status_t status = startAudioSource(source, attributes, &portId, uid);
-    ALOGE_IF(status != OK, "%s: failed to start audio source (%d)", __func__, status);
-    return mAudioSources.valueFor(portId);
-}
-
-
 status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
     ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
@@ -5986,15 +6033,26 @@
     // The caller can have the audio config criteria ignored by either passing a null ptr or
     // the AUDIO_CONFIG_INITIALIZER value.
     // If an audio config is specified, current policy is to only allow spatialization for
-    // some positional channel masks and PCM format
+    // some positional channel masks and PCM format and for stereo if low latency performance
+    // mode is not requested.
 
     if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
-        if (!audio_is_channel_mask_spatialized(config->channel_mask)) {
+        static const bool stereo_spatialization_enabled =
+                property_get_bool("ro.audio.stereo_spatialization_enabled", false);
+        const bool channel_mask_spatialized =
+                (stereo_spatialization_enabled && com_android_media_audio_stereo_spatialization())
+                ? audio_channel_mask_contains_stereo(config->channel_mask)
+                : audio_is_channel_mask_spatialized(config->channel_mask);
+        if (!channel_mask_spatialized) {
             return false;
         }
         if (!audio_is_linear_pcm(config->format)) {
             return false;
         }
+        if (config->channel_mask == AUDIO_CHANNEL_OUT_STEREO
+                && ((attr->flags & AUDIO_FLAG_LOW_LATENCY) != 0)) {
+            return false;
+        }
     }
 
     sp<IOProfile> profile =
@@ -6770,6 +6828,12 @@
         closingOutput->stop();
     }
     closingOutput->close();
+    if ((closingOutput->getFlags().output & AUDIO_OUTPUT_FLAG_BIT_PERFECT)
+            == AUDIO_OUTPUT_FLAG_BIT_PERFECT) {
+        for (const auto device : closingOutput->devices()) {
+            device->setPreferredConfig(nullptr);
+        }
+    }
 
     removeOutput(output);
     mPreviousOutputs = mOutputs;
@@ -7659,9 +7723,6 @@
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
     // The flags can be ignored if it doesn't contain a much match flag.
-    //
-    // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
-    // the best matching profile, not the first one.
 
     using underlying_input_flag_t = std::underlying_type_t<audio_input_flags_t>;
     const underlying_input_flag_t mustMatchFlag = AUDIO_INPUT_FLAG_MMAP_NOIRQ |
@@ -7678,27 +7739,35 @@
             for (const auto& profile : hwModule->getInputProfiles()) {
                 // profile->log();
                 //updatedFormat = format;
-                if (profile->isCompatibleProfile(DeviceVector(device), samplingRate,
-                                                 &samplingRate  /*updatedSamplingRate*/,
-                                                 format,
-                                                 &format,       /*updatedFormat*/
-                                                 channelMask,
-                                                 &channelMask   /*updatedChannelMask*/,
-                                                 // FIXME ugly cast
-                                                 (audio_output_flags_t) flags,
-                                                 true /*exactMatchRequiredForInputFlags*/)) {
+                if (profile->getCompatibilityScore(
+                        DeviceVector(device),
+                        samplingRate,
+                        &updatedSamplingRate,
+                        format,
+                        &updatedFormat,
+                        channelMask,
+                        &updatedChannelMask,
+                        // FIXME ugly cast
+                        (audio_output_flags_t) flags,
+                        true /*exactMatchRequiredForInputFlags*/) == IOProfile::EXACT_MATCH) {
+                    samplingRate = updatedSamplingRate;
+                    format = updatedFormat;
+                    channelMask = updatedChannelMask;
                     return profile;
                 }
-                if (firstInexact == nullptr && profile->isCompatibleProfile(DeviceVector(device),
-                                                 samplingRate,
-                                                 &updatedSamplingRate,
-                                                 format,
-                                                 &updatedFormat,
-                                                 channelMask,
-                                                 &updatedChannelMask,
-                                                 // FIXME ugly cast
-                                                 (audio_output_flags_t) flags,
-                                                 false /*exactMatchRequiredForInputFlags*/)) {
+                if (firstInexact == nullptr
+                        && profile->getCompatibilityScore(
+                                DeviceVector(device),
+                                samplingRate,
+                                &updatedSamplingRate,
+                                format,
+                                &updatedFormat,
+                                channelMask,
+                                &updatedChannelMask,
+                                // FIXME ugly cast
+                                (audio_output_flags_t) flags,
+                                false /*exactMatchRequiredForInputFlags*/)
+                                != IOProfile::NO_MATCH) {
                     firstInexact = profile;
                 }
             }
@@ -8404,6 +8473,12 @@
         ALOGE("%s failed to open output %d", __func__, status);
         return nullptr;
     }
+    if ((flags & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_BIT_PERFECT) {
+        auto portConfig = desc->getConfig();
+        for (const auto& device : devices) {
+            device->setPreferredConfig(&portConfig);
+        }
+    }
 
     // Here is where the out_set_parameters() for card & device gets called
     sp<DeviceDescriptor> device = devices.getDeviceForOpening();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 61be09f..a3232a2 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -292,6 +292,7 @@
 
         virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes);
         virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
+        virtual status_t getRegisteredPolicyMixes(std::vector<AudioMix>& mixes) override;
         virtual status_t updatePolicyMix(
                 const AudioMix& mix,
                 const std::vector<AudioMixMatchCriterion>& updatedCriteria) override;
@@ -339,7 +340,8 @@
         virtual status_t startAudioSource(const struct audio_port_config *source,
                                           const audio_attributes_t *attributes,
                                           audio_port_handle_t *portId,
-                                          uid_t uid);
+                                          uid_t uid,
+                                          bool internal = false);
         virtual status_t stopAudioSource(audio_port_handle_t portId);
 
         virtual status_t setMasterMono(bool mono);
@@ -1055,9 +1057,6 @@
         bool isMsdPatch(const audio_patch_handle_t &handle) const;
 
 private:
-        sp<SourceClientDescriptor> startAudioSourceInternal(
-                const struct audio_port_config *source, const audio_attributes_t *attributes,
-                uid_t uid);
 
         void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
 
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index fb55225..cddbf39 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -52,7 +53,7 @@
     static_libs: [
         "libeffectsconfig",
         "libaudiopolicycomponents",
-    ]
+    ],
 }
 
 cc_library {
@@ -75,10 +76,9 @@
     ],
 
     include_dirs: [
-        "frameworks/av/services/audioflinger"
+        "frameworks/av/services/audioflinger",
     ],
 
-
     static_libs: [
         "framework-permission-aidl-cpp",
     ],
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 85b7ad9..71edd57 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -42,15 +42,19 @@
 // ----------------------------------------------------------------------------
 
 AudioPolicyEffects::AudioPolicyEffects(const sp<EffectsFactoryHalInterface>& effectsFactoryHal) {
+    // Note: clang thread-safety permits the ctor to call guarded _l methods without
+    // acquiring the associated mutex capability as standard practice is to assume
+    // single threaded construction and destruction.
+
     // load xml config with effectsFactoryHal
-    status_t loadResult = loadAudioEffectConfig(effectsFactoryHal);
+    status_t loadResult = loadAudioEffectConfig_ll(effectsFactoryHal);
     if (loadResult < 0) {
         ALOGW("Failed to query effect configuration, fallback to load .conf");
         // load automatic audio effect modules
         if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
-            loadAudioEffectConfigLegacy(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
+            loadAudioEffectConfigLegacy_l(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
         } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
-            loadAudioEffectConfigLegacy(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
+            loadAudioEffectConfigLegacy_l(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
         }
     } else if (loadResult > 0) {
         ALOGE("Effect config is partially invalid, skipped %d elements", loadResult);
@@ -62,35 +66,6 @@
                 std::launch::async, &AudioPolicyEffects::initDefaultDeviceEffects, this);
 }
 
-AudioPolicyEffects::~AudioPolicyEffects()
-{
-    size_t i = 0;
-    // release audio input processing resources
-    for (i = 0; i < mInputSources.size(); i++) {
-        delete mInputSources.valueAt(i);
-    }
-    mInputSources.clear();
-
-    for (i = 0; i < mInputSessions.size(); i++) {
-        mInputSessions.valueAt(i)->mEffects.clear();
-        delete mInputSessions.valueAt(i);
-    }
-    mInputSessions.clear();
-
-    // release audio output processing resources
-    for (i = 0; i < mOutputStreams.size(); i++) {
-        delete mOutputStreams.valueAt(i);
-    }
-    mOutputStreams.clear();
-
-    for (i = 0; i < mOutputSessions.size(); i++) {
-        mOutputSessions.valueAt(i)->mEffects.clear();
-        delete mOutputSessions.valueAt(i);
-    }
-    mOutputSessions.clear();
-}
-
-
 status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input,
                              audio_source_t inputSource,
                              audio_session_t audioSession)
@@ -101,48 +76,43 @@
     audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ?
                                     AUDIO_SOURCE_VOICE_RECOGNITION : inputSource;
 
-    Mutex::Autolock _l(mLock);
-    ssize_t index = mInputSources.indexOfKey(aliasSource);
-    if (index < 0) {
+    audio_utils::lock_guard _l(mMutex);
+    auto sourceIt = mInputSources.find(aliasSource);
+    if (sourceIt == mInputSources.end()) {
         ALOGV("addInputEffects(): no processing needs to be attached to this source");
         return status;
     }
-    ssize_t idx = mInputSessions.indexOfKey(audioSession);
-    EffectVector *sessionDesc;
-    if (idx < 0) {
-        sessionDesc = new EffectVector(audioSession);
-        mInputSessions.add(audioSession, sessionDesc);
-    } else {
-        // EffectVector is existing and we just need to increase ref count
-        sessionDesc = mInputSessions.valueAt(idx);
+    std::shared_ptr<EffectVector>& sessionDesc = mInputSessions[audioSession];
+    if (sessionDesc == nullptr) {
+        sessionDesc = std::make_shared<EffectVector>(audioSession);
     }
     sessionDesc->mRefCount++;
 
     ALOGV("addInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
     if (sessionDesc->mRefCount == 1) {
         int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
-        for (size_t i = 0; i < effects.size(); i++) {
-            EffectDesc *effect = effects[i];
+        const std::shared_ptr<EffectDescVector>& effects = sourceIt->second;
+        for (const std::shared_ptr<EffectDesc>& effect : *effects) {
             AttributionSourceState attributionSource;
             attributionSource.packageName = "android";
             attributionSource.token = sp<BBinder>::make();
-            sp<AudioEffect> fx = new AudioEffect(attributionSource);
+            auto fx = sp<AudioEffect>::make(attributionSource);
             fx->set(nullptr /*type */, &effect->mUuid, -1 /* priority */, nullptr /* callback */,
                     audioSession, input);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGW("addInputEffects(): failed to create Fx %s on source %d",
-                      effect->mName, (int32_t)aliasSource);
+                      effect->mName.c_str(), (int32_t)aliasSource);
                 // fx goes out of scope and strong ref on AudioEffect is released
                 continue;
             }
             for (size_t j = 0; j < effect->mParams.size(); j++) {
-                fx->setParameter(effect->mParams[j]);
+                // const_cast here due to API.
+                fx->setParameter(const_cast<effect_param_t*>(effect->mParams[j].get()));
             }
             ALOGV("addInputEffects(): added Fx %s on source: %d",
-                  effect->mName, (int32_t)aliasSource);
-            sessionDesc->mEffects.add(fx);
+                  effect->mName.c_str(), (int32_t)aliasSource);
+            sessionDesc->mEffects.push_back(std::move(fx));
         }
         sessionDesc->setProcessorEnabled(true);
         IPCThreadState::self()->restoreCallingIdentity(token);
@@ -156,18 +126,17 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
-    ssize_t index = mInputSessions.indexOfKey(audioSession);
-    if (index < 0) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mInputSessions.find(audioSession);
+    if (it == mInputSessions.end()) {
         return status;
     }
-    EffectVector *sessionDesc = mInputSessions.valueAt(index);
+    std::shared_ptr<EffectVector> sessionDesc = it->second;
     sessionDesc->mRefCount--;
     ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
     if (sessionDesc->mRefCount == 0) {
         sessionDesc->setProcessorEnabled(false);
-        delete sessionDesc;
-        mInputSessions.removeItemsAt(index);
+        mInputSessions.erase(it);
         ALOGV("releaseInputEffects(): all effects released");
     }
     return status;
@@ -179,24 +148,16 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
-    size_t index;
-    for (index = 0; index < mInputSessions.size(); index++) {
-        if (mInputSessions.valueAt(index)->mSessionId == audioSession) {
-            break;
-        }
-    }
-    if (index == mInputSessions.size()) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mInputSessions.find(audioSession);
+    if (it == mInputSessions.end()) {
         *count = 0;
         return BAD_VALUE;
     }
-    Vector< sp<AudioEffect> > effects = mInputSessions.valueAt(index)->mEffects;
-
-    for (size_t i = 0; i < effects.size(); i++) {
-        effect_descriptor_t desc = effects[i]->descriptor();
-        if (i < *count) {
-            descriptors[i] = desc;
-        }
+    const std::vector<sp<AudioEffect>>& effects = it->second->mEffects;
+    const size_t copysize = std::min(effects.size(), (size_t)*count);
+    for (size_t i = 0; i < copysize; i++) {
+        descriptors[i] = effects[i]->descriptor();
     }
     if (effects.size() > *count) {
         status = NO_MEMORY;
@@ -212,24 +173,16 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
-    size_t index;
-    for (index = 0; index < mOutputSessions.size(); index++) {
-        if (mOutputSessions.valueAt(index)->mSessionId == audioSession) {
-            break;
-        }
-    }
-    if (index == mOutputSessions.size()) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mOutputSessions.find(audioSession);
+    if (it == mOutputSessions.end()) {
         *count = 0;
         return BAD_VALUE;
     }
-    Vector< sp<AudioEffect> > effects = mOutputSessions.valueAt(index)->mEffects;
-
-    for (size_t i = 0; i < effects.size(); i++) {
-        effect_descriptor_t desc = effects[i]->descriptor();
-        if (i < *count) {
-            descriptors[i] = desc;
-        }
+    const std::vector<sp<AudioEffect>>& effects = it->second->mEffects;
+    const size_t copysize = std::min(effects.size(), (size_t)*count);
+    for (size_t i = 0; i < copysize; i++) {
+        descriptors[i] = effects[i]->descriptor();
     }
     if (effects.size() > *count) {
         status = NO_MEMORY;
@@ -245,27 +198,22 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     // create audio processors according to stream
     // FIXME: should we have specific post processing settings for internal streams?
     // default to media for now.
     if (stream >= AUDIO_STREAM_PUBLIC_CNT) {
         stream = AUDIO_STREAM_MUSIC;
     }
-    ssize_t index = mOutputStreams.indexOfKey(stream);
-    if (index < 0) {
+    auto it = mOutputStreams.find(stream);
+    if (it == mOutputStreams.end()) {
         ALOGV("addOutputSessionEffects(): no output processing needed for this stream");
         return NO_ERROR;
     }
 
-    ssize_t idx = mOutputSessions.indexOfKey(audioSession);
-    EffectVector *procDesc;
-    if (idx < 0) {
-        procDesc = new EffectVector(audioSession);
-        mOutputSessions.add(audioSession, procDesc);
-    } else {
-        // EffectVector is existing and we just need to increase ref count
-        procDesc = mOutputSessions.valueAt(idx);
+    std::shared_ptr<EffectVector>& procDesc = mOutputSessions[audioSession];
+    if (procDesc == nullptr) {
+        procDesc = std::make_shared<EffectVector>(audioSession);
     }
     procDesc->mRefCount++;
 
@@ -274,25 +222,24 @@
     if (procDesc->mRefCount == 1) {
         // make sure effects are associated to audio server even if we are executing a binder call
         int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
-        for (size_t i = 0; i < effects.size(); i++) {
-            EffectDesc *effect = effects[i];
+        const std::shared_ptr<EffectDescVector>& effects = it->second;
+        for (const std::shared_ptr<EffectDesc>& effect : *effects) {
             AttributionSourceState attributionSource;
             attributionSource.packageName = "android";
             attributionSource.token = sp<BBinder>::make();
-            sp<AudioEffect> fx = new AudioEffect(attributionSource);
+            auto fx = sp<AudioEffect>::make(attributionSource);
             fx->set(nullptr /* type */, &effect->mUuid, 0 /* priority */, nullptr /* callback */,
                     audioSession, output);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGE("addOutputSessionEffects(): failed to create Fx  %s on session %d",
-                      effect->mName, audioSession);
+                      effect->mName.c_str(), audioSession);
                 // fx goes out of scope and strong ref on AudioEffect is released
                 continue;
             }
             ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d",
-                  effect->mName, audioSession, (int32_t)stream);
-            procDesc->mEffects.add(fx);
+                  effect->mName.c_str(), audioSession, (int32_t)stream);
+            procDesc->mEffects.push_back(std::move(fx));
         }
 
         procDesc->setProcessorEnabled(true);
@@ -305,30 +252,28 @@
                          audio_stream_type_t stream,
                          audio_session_t audioSession)
 {
-    status_t status = NO_ERROR;
     (void) output; // argument not used for now
     (void) stream; // argument not used for now
 
-    Mutex::Autolock _l(mLock);
-    ssize_t index = mOutputSessions.indexOfKey(audioSession);
-    if (index < 0) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mOutputSessions.find(audioSession);
+    if (it == mOutputSessions.end()) {
         ALOGV("releaseOutputSessionEffects: no output processing was attached to this stream");
         return NO_ERROR;
     }
 
-    EffectVector *procDesc = mOutputSessions.valueAt(index);
+    std::shared_ptr<EffectVector> procDesc = it->second;
     procDesc->mRefCount--;
     ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d",
           audioSession, procDesc->mRefCount);
     if (procDesc->mRefCount == 0) {
         procDesc->setProcessorEnabled(false);
         procDesc->mEffects.clear();
-        delete procDesc;
-        mOutputSessions.removeItemsAt(index);
+        mOutputSessions.erase(it);
         ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
               audioSession);
     }
-    return status;
+    return NO_ERROR;
 }
 
 status_t AudioPolicyEffects::addSourceDefaultEffect(const effect_uuid_t *type,
@@ -370,17 +315,12 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Find the EffectDescVector for the given source type, or create a new one if necessary.
-    ssize_t index = mInputSources.indexOfKey(source);
-    EffectDescVector *desc = NULL;
-    if (index < 0) {
-        // No effects for this source type yet.
-        desc = new EffectDescVector();
-        mInputSources.add(source, desc);
-    } else {
-        desc = mInputSources.valueAt(index);
+    std::shared_ptr<EffectDescVector>& desc = mInputSources[source];
+    if (desc == nullptr) {
+        desc = std::make_shared<EffectDescVector>();
     }
 
     // Create a new effect and add it to the vector.
@@ -389,9 +329,9 @@
         ALOGE("addSourceDefaultEffect(): failed to get new unique id.");
         return res;
     }
-    EffectDesc *effect = new EffectDesc(
+    std::shared_ptr<EffectDesc> effect = std::make_shared<EffectDesc>(
             descriptor.name, descriptor.type, opPackageName, descriptor.uuid, priority, *id);
-    desc->mEffects.add(effect);
+    desc->push_back(std::move(effect));
     // TODO(b/71813697): Support setting params as well.
 
     // TODO(b/71814300): Retroactively attach to any existing sources of the given type.
@@ -435,17 +375,13 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Find the EffectDescVector for the given stream type, or create a new one if necessary.
-    ssize_t index = mOutputStreams.indexOfKey(stream);
-    EffectDescVector *desc = NULL;
-    if (index < 0) {
+    std::shared_ptr<EffectDescVector>& desc = mOutputStreams[stream];
+    if (desc == nullptr) {
         // No effects for this stream type yet.
-        desc = new EffectDescVector();
-        mOutputStreams.add(stream, desc);
-    } else {
-        desc = mOutputStreams.valueAt(index);
+        desc = std::make_shared<EffectDescVector>();
     }
 
     // Create a new effect and add it to the vector.
@@ -454,9 +390,9 @@
         ALOGE("addStreamDefaultEffect(): failed to get new unique id.");
         return res;
     }
-    EffectDesc *effect = new EffectDesc(
+    std::shared_ptr<EffectDesc> effect = std::make_shared<EffectDesc>(
             descriptor.name, descriptor.type, opPackageName, descriptor.uuid, priority, *id);
-    desc->mEffects.add(effect);
+    desc->push_back(std::move(effect));
     // TODO(b/71813697): Support setting params as well.
 
     // TODO(b/71814300): Retroactively attach to any existing streams of the given type.
@@ -475,18 +411,16 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Check each source type.
-    size_t numSources = mInputSources.size();
-    for (size_t i = 0; i < numSources; ++i) {
+    for (auto& [source, descVector] : mInputSources) {
         // Check each effect for each source.
-        EffectDescVector* descVector = mInputSources[i];
-        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+        for (auto desc = descVector->begin(); desc != descVector->end(); ++desc) {
             if ((*desc)->mId == id) {
                 // Found it!
                 // TODO(b/71814300): Remove from any sources the effect was attached to.
-                descVector->mEffects.erase(desc);
+                descVector->erase(desc);
                 // Handles are unique; there can only be one match, so return early.
                 return NO_ERROR;
             }
@@ -506,18 +440,16 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Check each stream type.
-    size_t numStreams = mOutputStreams.size();
-    for (size_t i = 0; i < numStreams; ++i) {
+    for (auto& [stream, descVector] : mOutputStreams) {
         // Check each effect for each stream.
-        EffectDescVector* descVector = mOutputStreams[i];
-        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+        for (auto desc = descVector->begin(); desc != descVector->end(); ++desc) {
             if ((*desc)->mId == id) {
                 // Found it!
                 // TODO(b/71814300): Remove from any streams the effect was attached to.
-                descVector->mEffects.erase(desc);
+                descVector->erase(desc);
                 // Handles are unique; there can only be one match, so return early.
                 return NO_ERROR;
             }
@@ -530,8 +462,8 @@
 
 void AudioPolicyEffects::EffectVector::setProcessorEnabled(bool enabled)
 {
-    for (size_t i = 0; i < mEffects.size(); i++) {
-        mEffects.itemAt(i)->setEnabled(enabled);
+    for (const auto& effect : mEffects) {
+        effect->setEnabled(enabled);
     }
 }
 
@@ -540,7 +472,8 @@
 // Audio processing configuration
 // ----------------------------------------------------------------------------
 
-/*static*/ const char * const AudioPolicyEffects::kInputSourceNames[AUDIO_SOURCE_CNT -1] = {
+// we keep to const char* instead of std::string_view as comparison is believed faster.
+constexpr const char* kInputSourceNames[AUDIO_SOURCE_CNT - 1] = {
     MIC_SRC_TAG,
     VOICE_UL_SRC_TAG,
     VOICE_DL_SRC_TAG,
@@ -567,7 +500,8 @@
     return (audio_source_t)i;
 }
 
-const char *AudioPolicyEffects::kStreamNames[AUDIO_STREAM_PUBLIC_CNT+1] = {
+// +1 as enum starts from -1
+constexpr const char* kStreamNames[AUDIO_STREAM_PUBLIC_CNT + 1] = {
     AUDIO_STREAM_DEFAULT_TAG,
     AUDIO_STREAM_VOICE_CALL_TAG,
     AUDIO_STREAM_SYSTEM_TAG,
@@ -584,6 +518,7 @@
 
 // returns the audio_stream_t enum corresponding to the output stream name or
 // AUDIO_STREAM_PUBLIC_CNT is no match found
+/* static */
 audio_stream_type_t AudioPolicyEffects::streamNameToEnum(const char *name)
 {
     int i;
@@ -600,6 +535,7 @@
 // Audio Effect Config parser
 // ----------------------------------------------------------------------------
 
+/* static */
 size_t AudioPolicyEffects::growParamSize(char **param,
                                          size_t size,
                                          size_t *curSize,
@@ -623,7 +559,7 @@
     return pos;
 }
 
-
+/* static */
 size_t AudioPolicyEffects::readParamValue(cnode *node,
                                           char **param,
                                           size_t *curSize,
@@ -692,7 +628,8 @@
     return len;
 }
 
-effect_param_t *AudioPolicyEffects::loadEffectParameter(cnode *root)
+/* static */
+std::shared_ptr<const effect_param_t> AudioPolicyEffects::loadEffectParameter(cnode* root)
 {
     cnode *param;
     cnode *value;
@@ -722,7 +659,7 @@
             *ptr = atoi(param->value);
             fx_param->psize = sizeof(int);
             fx_param->vsize = sizeof(int);
-            return fx_param;
+            return {fx_param, free};
         }
     }
     if (param == NULL || value == NULL) {
@@ -760,42 +697,43 @@
         value = value->next;
     }
 
-    return fx_param;
+    return {fx_param, free};
 
 error:
     free(fx_param);
     return NULL;
 }
 
-void AudioPolicyEffects::loadEffectParameters(cnode *root, Vector <effect_param_t *>& params)
+/* static */
+void AudioPolicyEffects::loadEffectParameters(
+        cnode* root, std::vector<std::shared_ptr<const effect_param_t>>& params)
 {
     cnode *node = root->first_child;
     while (node) {
         ALOGV("loadEffectParameters() loading param %s", node->name);
-        effect_param_t *param = loadEffectParameter(node);
-        if (param != NULL) {
-            params.add(param);
+        const auto param = loadEffectParameter(node);
+        if (param != nullptr) {
+            params.push_back(param);
         }
         node = node->next;
     }
 }
 
-
-AudioPolicyEffects::EffectDescVector *AudioPolicyEffects::loadEffectConfig(
-                                                            cnode *root,
-                                                            const Vector <EffectDesc *>& effects)
+/* static */
+std::shared_ptr<AudioPolicyEffects::EffectDescVector> AudioPolicyEffects::loadEffectConfig(
+        cnode* root, const EffectDescVector& effects)
 {
     cnode *node = root->first_child;
     if (node == NULL) {
         ALOGW("loadInputSource() empty element %s", root->name);
         return NULL;
     }
-    EffectDescVector *desc = new EffectDescVector();
+    auto desc = std::make_shared<EffectDescVector>();
     while (node) {
         size_t i;
 
         for (i = 0; i < effects.size(); i++) {
-            if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) {
+            if (effects[i]->mName == node->name) {
                 ALOGV("loadEffectConfig() found effect %s in list", node->name);
                 break;
             }
@@ -805,23 +743,22 @@
             node = node->next;
             continue;
         }
-        EffectDesc *effect = new EffectDesc(*effects[i]);   // deep copy
+        auto effect = std::make_shared<EffectDesc>(*effects[i]);   // deep copy
         loadEffectParameters(node, effect->mParams);
         ALOGV("loadEffectConfig() adding effect %s uuid %08x",
-              effect->mName, effect->mUuid.timeLow);
-        desc->mEffects.add(effect);
+              effect->mName.c_str(), effect->mUuid.timeLow);
+        desc->push_back(std::move(effect));
         node = node->next;
     }
-    if (desc->mEffects.size() == 0) {
+    if (desc->empty()) {
         ALOGW("loadEffectConfig() no valid effects found in config %s", root->name);
-        delete desc;
-        return NULL;
+        return nullptr;
     }
     return desc;
 }
 
-status_t AudioPolicyEffects::loadInputEffectConfigurations(cnode *root,
-                                                           const Vector <EffectDesc *>& effects)
+status_t AudioPolicyEffects::loadInputEffectConfigurations_l(cnode* root,
+        const EffectDescVector& effects)
 {
     cnode *node = config_find(root, PREPROCESSING_TAG);
     if (node == NULL) {
@@ -831,24 +768,24 @@
     while (node) {
         audio_source_t source = inputSourceNameToEnum(node->name);
         if (source == AUDIO_SOURCE_CNT) {
-            ALOGW("loadInputSources() invalid input source %s", node->name);
+            ALOGW("%s() invalid input source %s", __func__, node->name);
             node = node->next;
             continue;
         }
-        ALOGV("loadInputSources() loading input source %s", node->name);
-        EffectDescVector *desc = loadEffectConfig(node, effects);
+        ALOGV("%s() loading input source %s", __func__, node->name);
+        auto desc = loadEffectConfig(node, effects);
         if (desc == NULL) {
             node = node->next;
             continue;
         }
-        mInputSources.add(source, desc);
+        mInputSources[source] = std::move(desc);
         node = node->next;
     }
     return NO_ERROR;
 }
 
-status_t AudioPolicyEffects::loadStreamEffectConfigurations(cnode *root,
-                                                            const Vector <EffectDesc *>& effects)
+status_t AudioPolicyEffects::loadStreamEffectConfigurations_l(cnode* root,
+        const EffectDescVector& effects)
 {
     cnode *node = config_find(root, OUTPUT_SESSION_PROCESSING_TAG);
     if (node == NULL) {
@@ -858,23 +795,24 @@
     while (node) {
         audio_stream_type_t stream = streamNameToEnum(node->name);
         if (stream == AUDIO_STREAM_PUBLIC_CNT) {
-            ALOGW("loadStreamEffectConfigurations() invalid output stream %s", node->name);
+            ALOGW("%s() invalid output stream %s", __func__, node->name);
             node = node->next;
             continue;
         }
-        ALOGV("loadStreamEffectConfigurations() loading output stream %s", node->name);
-        EffectDescVector *desc = loadEffectConfig(node, effects);
+        ALOGV("%s() loading output stream %s", __func__, node->name);
+        std::shared_ptr<EffectDescVector> desc = loadEffectConfig(node, effects);
         if (desc == NULL) {
             node = node->next;
             continue;
         }
-        mOutputStreams.add(stream, desc);
+        mOutputStreams[stream] = std::move(desc);
         node = node->next;
     }
     return NO_ERROR;
 }
 
-AudioPolicyEffects::EffectDesc *AudioPolicyEffects::loadEffect(cnode *root)
+/* static */
+std::shared_ptr<AudioPolicyEffects::EffectDesc> AudioPolicyEffects::loadEffect(cnode* root)
 {
     cnode *node = config_find(root, UUID_TAG);
     if (node == NULL) {
@@ -885,30 +823,33 @@
         ALOGW("loadEffect() invalid uuid %s", node->value);
         return NULL;
     }
-    return new EffectDesc(root->name, uuid);
+    return std::make_shared<EffectDesc>(root->name, uuid);
 }
 
-status_t AudioPolicyEffects::loadEffects(cnode *root, Vector <EffectDesc *>& effects)
+/* static */
+android::AudioPolicyEffects::EffectDescVector AudioPolicyEffects::loadEffects(cnode *root)
 {
+    EffectDescVector effects;
     cnode *node = config_find(root, EFFECTS_TAG);
     if (node == NULL) {
-        return -ENOENT;
+        ALOGW("%s() Cannot find %s configuration", __func__, EFFECTS_TAG);
+        return effects;
     }
     node = node->first_child;
     while (node) {
         ALOGV("loadEffects() loading effect %s", node->name);
-        EffectDesc *effect = loadEffect(node);
+        auto effect = loadEffect(node);
         if (effect == NULL) {
             node = node->next;
             continue;
         }
-        effects.add(effect);
+        effects.push_back(std::move(effect));
         node = node->next;
     }
-    return NO_ERROR;
+    return effects;
 }
 
-status_t AudioPolicyEffects::loadAudioEffectConfig(
+status_t AudioPolicyEffects::loadAudioEffectConfig_ll(
         const sp<EffectsFactoryHalInterface>& effectsFactoryHal) {
     if (!effectsFactoryHal) {
         ALOGE("%s Null EffectsFactoryHalInterface", __func__);
@@ -924,11 +865,12 @@
 
     auto loadProcessingChain = [](auto& processingChain, auto& streams) {
         for (auto& stream : processingChain) {
-            auto effectDescs = std::make_unique<EffectDescVector>();
+            auto effectDescs = std::make_shared<EffectDescVector>();
             for (auto& effect : stream.effects) {
-                effectDescs->mEffects.add(new EffectDesc{effect->name.c_str(), effect->uuid});
+                effectDescs->push_back(
+                        std::make_shared<EffectDesc>(effect->name, effect->uuid));
             }
-            streams.add(stream.type, effectDescs.release());
+            streams[stream.type] = std::move(effectDescs);
         }
     };
 
@@ -936,26 +878,26 @@
         for (auto& deviceProcess : processingChain) {
             auto effectDescs = std::make_unique<EffectDescVector>();
             for (auto& effect : deviceProcess.effects) {
-                effectDescs->mEffects.add(new EffectDesc{effect->name.c_str(), effect->uuid});
+                effectDescs->push_back(
+                        std::make_shared<EffectDesc>(effect->name, effect->uuid));
             }
-            auto deviceEffects = std::make_unique<DeviceEffects>(
+            auto devEffects = std::make_unique<DeviceEffects>(
                         std::move(effectDescs), deviceProcess.type, deviceProcess.address);
-            devicesEffects.emplace(deviceProcess.address, std::move(deviceEffects));
+            devicesEffects.emplace(deviceProcess.address, std::move(devEffects));
         }
     };
 
+    // access to mInputSources and mOutputStreams requires mMutex;
     loadProcessingChain(processings->preprocess, mInputSources);
     loadProcessingChain(processings->postprocess, mOutputStreams);
 
-    {
-        Mutex::Autolock _l(mLock);
-        loadDeviceProcessingChain(processings->deviceprocess, mDeviceEffects);
-    }
+    // access to mDeviceEffects requires mDeviceEffectsMutex
+    loadDeviceProcessingChain(processings->deviceprocess, mDeviceEffects);
 
     return skippedElements;
 }
 
-status_t AudioPolicyEffects::loadAudioEffectConfigLegacy(const char *path)
+status_t AudioPolicyEffects::loadAudioEffectConfigLegacy_l(const char *path)
 {
     cnode *root;
     char *data;
@@ -967,15 +909,11 @@
     root = config_node("", "");
     config_load(root, data);
 
-    Vector <EffectDesc *> effects;
-    loadEffects(root, effects);
-    loadInputEffectConfigurations(root, effects);
-    loadStreamEffectConfigurations(root, effects);
+    const EffectDescVector effects = loadEffects(root);
 
-    for (size_t i = 0; i < effects.size(); i++) {
-        delete effects[i];
-    }
-
+    // requires mMutex
+    loadInputEffectConfigurations_l(root, effects);
+    loadStreamEffectConfigurations_l(root, effects);
     config_free(root);
     free(root);
     free(data);
@@ -985,14 +923,14 @@
 
 void AudioPolicyEffects::initDefaultDeviceEffects()
 {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mDeviceEffectsMutex);
     for (const auto& deviceEffectsIter : mDeviceEffects) {
         const auto& deviceEffects =  deviceEffectsIter.second;
-        for (const auto& effectDesc : deviceEffects->mEffectDescriptors->mEffects) {
+        for (const auto& effectDesc : *deviceEffects->mEffectDescriptors) {
             AttributionSourceState attributionSource;
             attributionSource.packageName = "android";
             attributionSource.token = sp<BBinder>::make();
-            sp<AudioEffect> fx = new AudioEffect(attributionSource);
+            sp<AudioEffect> fx = sp<AudioEffect>::make(attributionSource);
             fx->set(EFFECT_UUID_NULL, &effectDesc->mUuid, 0 /* priority */, nullptr /* callback */,
                     AUDIO_SESSION_DEVICE, AUDIO_IO_HANDLE_NONE,
                     AudioDeviceTypeAddr{deviceEffects->getDeviceType(),
@@ -1000,16 +938,16 @@
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGE("%s(): failed to create Fx %s on port type=%d address=%s", __func__,
-                      effectDesc->mName, deviceEffects->getDeviceType(),
+                      effectDesc->mName.c_str(), deviceEffects->getDeviceType(),
                       deviceEffects->getDeviceAddress().c_str());
                 // fx goes out of scope and strong ref on AudioEffect is released
                 continue;
             }
             fx->setEnabled(true);
             ALOGV("%s(): create Fx %s added on port type=%d address=%s", __func__,
-                  effectDesc->mName, deviceEffects->getDeviceType(),
+                  effectDesc->mName.c_str(), deviceEffects->getDeviceType(),
                   deviceEffects->getDeviceAddress().c_str());
-            deviceEffects->mEffects.push_back(fx);
+            deviceEffects->mEffects.push_back(std::move(fx));
         }
     }
 }
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index e17df48..a9628c2 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_AUDIOPOLICYEFFECTS_H
-#define ANDROID_AUDIOPOLICYEFFECTS_H
+#pragma once
 
 #include <stdlib.h>
 #include <stdio.h>
@@ -23,6 +22,7 @@
 #include <future>
 
 #include <android-base/thread_annotations.h>
+#include <audio_utils/mutex.h>
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
 #include <media/audiohal/EffectsFactoryHalInterface.h>
@@ -56,44 +56,43 @@
     // First it will look whether vendor specific file exists,
     // otherwise it will parse the system default file.
     explicit AudioPolicyEffects(const sp<EffectsFactoryHalInterface>& effectsFactoryHal);
-    virtual ~AudioPolicyEffects();
 
     // NOTE: methods on AudioPolicyEffects should never be called with the AudioPolicyService
-    // main mutex (mLock) held as they will indirectly call back into AudioPolicyService when
+    // main mutex (mMutex) held as they will indirectly call back into AudioPolicyService when
     // managing audio effects.
 
     // Return a list of effect descriptors for default input effects
     // associated with audioSession
     status_t queryDefaultInputEffects(audio_session_t audioSession,
                              effect_descriptor_t *descriptors,
-                             uint32_t *count);
+                             uint32_t* count) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add all input effects associated with this input
     // Effects are attached depending on the audio_source_t
     status_t addInputEffects(audio_io_handle_t input,
                              audio_source_t inputSource,
-                             audio_session_t audioSession);
+                             audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add all input effects associated to this input
     status_t releaseInputEffects(audio_io_handle_t input,
-                                 audio_session_t audioSession);
+                                 audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Return a list of effect descriptors for default output effects
     // associated with audioSession
     status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
                              effect_descriptor_t *descriptors,
-                             uint32_t *count);
+                             uint32_t* count) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add all output effects associated to this output
     // Effects are attached depending on the audio_stream_type_t
     status_t addOutputSessionEffects(audio_io_handle_t output,
                              audio_stream_type_t stream,
-                             audio_session_t audioSession);
+                             audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // release all output effects associated with this output stream and audiosession
     status_t releaseOutputSessionEffects(audio_io_handle_t output,
                              audio_stream_type_t stream,
-                             audio_session_t audioSession);
+                             audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add the effect to the list of default effects for sources of type |source|.
     status_t addSourceDefaultEffect(const effect_uuid_t *type,
@@ -101,7 +100,7 @@
                                     const effect_uuid_t *uuid,
                                     int32_t priority,
                                     audio_source_t source,
-                                    audio_unique_id_t* id);
+                                    audio_unique_id_t* id) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add the effect to the list of default effects for streams of a given usage.
     status_t addStreamDefaultEffect(const effect_uuid_t *type,
@@ -109,36 +108,39 @@
                                     const effect_uuid_t *uuid,
                                     int32_t priority,
                                     audio_usage_t usage,
-                                    audio_unique_id_t* id);
+                                    audio_unique_id_t* id) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Remove the default source effect from wherever it's attached.
-    status_t removeSourceDefaultEffect(audio_unique_id_t id);
+    status_t removeSourceDefaultEffect(audio_unique_id_t id) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Remove the default stream effect from wherever it's attached.
-    status_t removeStreamDefaultEffect(audio_unique_id_t id);
+    status_t removeStreamDefaultEffect(audio_unique_id_t id) EXCLUDES_AudioPolicyEffects_Mutex;
 
+    // Called by AudioPolicyService::onFirstRef() to load device effects
+    // on a separate worker thread.
+    // TODO(b/319515492) move this initialization after AudioPolicyService::onFirstRef().
     void setDefaultDeviceEffects();
 
 private:
-    void initDefaultDeviceEffects();
 
     // class to store the description of an effects and its parameters
     // as defined in audio_effects.conf
     class EffectDesc {
     public:
-        EffectDesc(const char *name,
+        EffectDesc(std::string_view name,
                    const effect_uuid_t& typeUuid,
                    const String16& opPackageName,
                    const effect_uuid_t& uuid,
                    uint32_t priority,
                    audio_unique_id_t id) :
-                        mName(strdup(name)),
+                        mName(name),
                         mTypeUuid(typeUuid),
                         mOpPackageName(opPackageName),
                         mUuid(uuid),
                         mPriority(priority),
                         mId(id) { }
-        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+        // Modern EffectDesc usage:
+        EffectDesc(std::string_view name, const effect_uuid_t& uuid) :
                         EffectDesc(name,
                                    *EFFECT_UUID_NULL,
                                    String16(""),
@@ -146,67 +148,36 @@
                                    0,
                                    AUDIO_UNIQUE_ID_ALLOCATE) { }
         EffectDesc(const EffectDesc& orig) :
-                        mName(strdup(orig.mName)),
+                        mName(orig.mName),
                         mTypeUuid(orig.mTypeUuid),
                         mOpPackageName(orig.mOpPackageName),
                         mUuid(orig.mUuid),
                         mPriority(orig.mPriority),
-                        mId(orig.mId) {
-                            // deep copy mParams
-                            for (size_t k = 0; k < orig.mParams.size(); k++) {
-                                effect_param_t *origParam = orig.mParams[k];
-                                // psize and vsize are rounded up to an int boundary for allocation
-                                size_t origSize = sizeof(effect_param_t) +
-                                                  ((origParam->psize + 3) & ~3) +
-                                                  ((origParam->vsize + 3) & ~3);
-                                effect_param_t *dupParam = (effect_param_t *) malloc(origSize);
-                                memcpy(dupParam, origParam, origSize);
-                                // This works because the param buffer allocation is also done by
-                                // multiples of 4 bytes originally. In theory we should memcpy only
-                                // the actual param size, that is without rounding vsize.
-                                mParams.add(dupParam);
-                            }
-                        }
-        /*virtual*/ ~EffectDesc() {
-            free(mName);
-            for (size_t k = 0; k < mParams.size(); k++) {
-                free(mParams[k]);
-            }
-        }
-        char *mName;
-        effect_uuid_t mTypeUuid;
-        String16 mOpPackageName;
-        effect_uuid_t mUuid;
-        int32_t mPriority;
-        audio_unique_id_t mId;
-        Vector <effect_param_t *> mParams;
+                        mId(orig.mId),
+                        mParams(orig.mParams) { }
+
+        const std::string mName;
+        const effect_uuid_t mTypeUuid;
+        const String16 mOpPackageName;
+        const effect_uuid_t mUuid;
+        const int32_t mPriority;
+        const audio_unique_id_t mId;
+        std::vector<std::shared_ptr<const effect_param_t>> mParams;
     };
 
-    // class to store voctor of EffectDesc
-    class EffectDescVector {
-    public:
-        EffectDescVector() {}
-        /*virtual*/ ~EffectDescVector() {
-            for (size_t j = 0; j < mEffects.size(); j++) {
-                delete mEffects[j];
-            }
-        }
-        Vector <EffectDesc *> mEffects;
-    };
+    using EffectDescVector = std::vector<std::shared_ptr<EffectDesc>>;
 
-    // class to store voctor of AudioEffects
     class EffectVector {
     public:
-        explicit EffectVector(audio_session_t session) : mSessionId(session), mRefCount(0) {}
-        /*virtual*/ ~EffectVector() {}
+        explicit EffectVector(audio_session_t session) : mSessionId(session) {}
 
         // Enable or disable all effects in effect vector
         void setProcessorEnabled(bool enabled);
 
         const audio_session_t mSessionId;
-        // AudioPolicyManager keeps mLock, no need for lock on reference count here
-        int mRefCount;
-        Vector< sp<AudioEffect> >mEffects;
+        // AudioPolicyManager keeps mMutex, no need for lock on reference count here
+        int mRefCount = 0;
+        std::vector<sp<AudioEffect>> mEffects;
     };
 
     /**
@@ -215,12 +186,11 @@
     class DeviceEffects {
     public:
         DeviceEffects(std::unique_ptr<EffectDescVector> effectDescriptors,
-                               audio_devices_t device, const std::string& address) :
+                               audio_devices_t device, std::string_view address) :
             mEffectDescriptors(std::move(effectDescriptors)),
             mDeviceType(device), mDeviceAddress(address) {}
-        /*virtual*/ ~DeviceEffects() = default;
 
-        std::vector< sp<AudioEffect> > mEffects;
+        std::vector<sp<AudioEffect>> mEffects;
         audio_devices_t getDeviceType() const { return mDeviceType; }
         std::string getDeviceAddress() const { return mDeviceAddress; }
         const std::unique_ptr<EffectDescVector> mEffectDescriptors;
@@ -231,65 +201,98 @@
 
     };
 
-    static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1];
+    // Called on an async thread because it creates AudioEffects
+    // which register with AudioFlinger and AudioPolicy.
+    // We must therefore exclude the EffectHandle_Mutex.
+    void initDefaultDeviceEffects() EXCLUDES(mDeviceEffectsMutex) EXCLUDES_EffectHandle_Mutex;
+
+    status_t loadAudioEffectConfig_ll(const sp<EffectsFactoryHalInterface>& effectsFactoryHal)
+            REQUIRES(mMutex, mDeviceEffectsMutex);
+
+    // Legacy: Begin methods below.
+    // Parse audio_effects.conf - called from constructor.
+    status_t loadAudioEffectConfigLegacy_l(const char* path) REQUIRES(mMutex);
+
+    // Legacy: Load all automatic effect configurations
+    status_t loadInputEffectConfigurations_l(cnode* root,
+            const EffectDescVector& effects) REQUIRES(mMutex);
+    status_t loadStreamEffectConfigurations_l(cnode* root,
+            const EffectDescVector& effects) REQUIRES(mMutex);
+
+    // Legacy: static methods below.
+
     static audio_source_t inputSourceNameToEnum(const char *name);
 
-    static const char *kStreamNames[AUDIO_STREAM_PUBLIC_CNT+1]; //+1 required as streams start from -1
-    audio_stream_type_t streamNameToEnum(const char *name);
-
-    // Parse audio_effects.conf
-    status_t loadAudioEffectConfigLegacy(const char *path);
-    status_t loadAudioEffectConfig(const sp<EffectsFactoryHalInterface>& effectsFactoryHal);
+    static audio_stream_type_t streamNameToEnum(const char* name);
 
     // Load all effects descriptors in configuration file
-    status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects);
-    EffectDesc *loadEffect(cnode *root);
-
-    // Load all automatic effect configurations
-    status_t loadInputEffectConfigurations(cnode *root, const Vector <EffectDesc *>& effects);
-    status_t loadStreamEffectConfigurations(cnode *root, const Vector <EffectDesc *>& effects);
-    EffectDescVector *loadEffectConfig(cnode *root, const Vector <EffectDesc *>& effects);
+    static EffectDescVector loadEffects(cnode* root);
+    static std::shared_ptr<AudioPolicyEffects::EffectDesc> loadEffect(cnode* root);
+    static std::shared_ptr<EffectDescVector> loadEffectConfig(cnode* root,
+            const EffectDescVector& effects);
 
     // Load all automatic effect parameters
-    void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params);
-    effect_param_t *loadEffectParameter(cnode *root);
-    size_t readParamValue(cnode *node,
+    static void loadEffectParameters(
+            cnode* root, std::vector<std::shared_ptr<const effect_param_t>>& params);
+
+    // loadEffectParameter returns a shared_ptr instead of a unique_ptr as there may
+    // be multiple references to the same effect parameter.
+    static std::shared_ptr<const effect_param_t> loadEffectParameter(cnode* root);
+    static size_t readParamValue(cnode* node,
                           char **param,
                           size_t *curSize,
                           size_t *totSize);
-    size_t growParamSize(char **param,
+    static size_t growParamSize(char** param,
                          size_t size,
                          size_t *curSize,
                          size_t *totSize);
 
+    // Legacy: End methods above.
+
+    // Note: The association of Effects to audio source, session, or stream
+    // is done through std::map instead of std::unordered_map.  This gives
+    // better reproducibility of issues, since map is ordered and more predictable
+    // in enumeration.
+
     // protects access to mInputSources, mInputSessions, mOutputStreams, mOutputSessions
-    // never hold AudioPolicyService::mLock when calling AudioPolicyEffects methods as
+    // never hold AudioPolicyService::mMutex when calling AudioPolicyEffects methods as
     // those can call back into AudioPolicyService methods and try to acquire the mutex
-    Mutex mLock;
+    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kAudioPolicyEffects_Mutex};
     // Automatic input effects are configured per audio_source_t
-    KeyedVector< audio_source_t, EffectDescVector* > mInputSources;
-    // Automatic input effects are unique for audio_io_handle_t
-    KeyedVector< audio_session_t, EffectVector* > mInputSessions;
+    std::map<audio_source_t, std::shared_ptr<EffectDescVector>> mInputSources
+            GUARDED_BY(mMutex);
+    // Automatic input effects are unique for an audio_session_t.
+    std::map<audio_session_t, std::shared_ptr<EffectVector>> mInputSessions
+            GUARDED_BY(mMutex);
 
     // Automatic output effects are organized per audio_stream_type_t
-    KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
-    // Automatic output effects are unique for audiosession ID
-    KeyedVector< audio_session_t, EffectVector* > mOutputSessions;
+    std::map<audio_stream_type_t, std::shared_ptr<EffectDescVector>> mOutputStreams
+            GUARDED_BY(mMutex);
+    // Automatic output effects are unique for an audio_session_t.
+    std::map<audio_session_t, std::shared_ptr<EffectVector>> mOutputSessions
+            GUARDED_BY(mMutex);
 
     /**
      * @brief mDeviceEffects map of device effects indexed by the device address
      */
-    std::map<std::string, std::unique_ptr<DeviceEffects>> mDeviceEffects GUARDED_BY(mLock);
+
+    // mDeviceEffects is never accessed through AudioPolicyEffects methods.
+    // We keep a separate mutex here to catch future methods attempting to access this variable.
+    std::mutex mDeviceEffectsMutex;
+    std::map<std::string, std::unique_ptr<DeviceEffects>> mDeviceEffects
+            GUARDED_BY(mDeviceEffectsMutex);
 
     /**
      * Device Effect initialization must be asynchronous: the audio_policy service parses and init
      * effect on first reference. AudioFlinger will handle effect creation and register these
      * effect on audio_policy service.
-     * We must store the reference of the furture garantee real asynchronous operation.
+     *
+     * The future is associated with the std::async launched thread - no need to lock as
+     * it is only set once on init.  Due to the async nature, it is conceivable that
+     * some device effects are not available immediately after AudioPolicyService::onFirstRef()
+     * while the effects are being created.
      */
     std::future<void> mDefaultDeviceEffectFuture;
 };
 
 } // namespace android
-
-#endif // ANDROID_AUDIOPOLICYEFFECTS_H
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 509b673..2a4c069 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -114,7 +114,7 @@
 void AudioPolicyService::doOnNewAudioModulesAvailable()
 {
     if (mAudioPolicyManager == NULL) return;
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     mAudioPolicyManager->onNewAudioModulesAvailable();
 }
@@ -140,7 +140,7 @@
     }
 
     ALOGV("setDeviceConnectionState()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status = mAudioPolicyManager->setDeviceConnectionState(
             state, port, encodedFormat);
@@ -162,7 +162,7 @@
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
         return Status::ok();
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
@@ -190,7 +190,7 @@
     }
 
     ALOGV("handleDeviceConfigChange()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status =  mAudioPolicyManager->handleDeviceConfigChange(
             device, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
@@ -221,7 +221,7 @@
     // acquire lock before calling setMode() so that setMode() + setPhoneState() are an atomic
     // operation from policy manager standpoint (no other operation (e.g track start or stop)
     // can be interleaved).
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     // TODO: check if it is more appropriate to do it in platform specific policy manager
 
     // Audio HAL mode conversion for call redirect modes
@@ -242,7 +242,7 @@
 }
 
 Status AudioPolicyService::getPhoneState(AudioMode* _aidl_return) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_mode_t_AudioMode(mPhoneState));
     return Status::ok();
 }
@@ -270,7 +270,7 @@
         return binderStatusFromStatusT(BAD_VALUE);
     }
     ALOGV("setForceUse()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     mAudioPolicyManager->setForceUse(usage, config);
     onCheckSpatializer_l();
@@ -312,7 +312,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
     ALOGV("getOutput()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(mAudioPolicyManager->getOutput(stream)));
@@ -352,7 +352,7 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
 
     ALOGV("%s()", __func__);
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     if (!mPackageManager.allowPlaybackCapture(VALUE_OR_RETURN_BINDER_STATUS(
         aidl2legacy_int32_t_uid_t(attributionSource.uid)))) {
@@ -458,7 +458,7 @@
                                                      sp<AudioPolicyEffects>& effects,
                                                      const char *context)
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
     if (index < 0) {
         ALOGE("%s AudioTrack client not found for portId %d", context, portId);
@@ -489,7 +489,7 @@
             ALOGW("Failed to add effects on session %d", client->session);
         }
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status = mAudioPolicyManager->startOutput(portId);
     if (status == NO_ERROR) {
@@ -531,7 +531,7 @@
             ALOGW("Failed to release effects on session %d", client->session);
         }
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status = mAudioPolicyManager->stopOutput(portId);
     if (status == NO_ERROR) {
@@ -567,7 +567,7 @@
         audioPolicyEffects->releaseOutputSessionEffects(
             client->io, client->stream, client->session);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (client != nullptr && client->active) {
         onUpdateActiveSpatializerTracks_l();
     }
@@ -691,7 +691,7 @@
         status_t status;
         AudioPolicyInterface::input_type_t inputType;
 
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         {
             AutoCallerClear acc;
             // the audio_in_acoustics_t parameter is ignored by get_input()
@@ -794,7 +794,7 @@
     }
     sp<AudioRecordClient> client;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
 
         ssize_t index = mAudioRecordClients.indexOfKey(portId);
         if (index < 0) {
@@ -817,7 +817,7 @@
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     ALOGW_IF(client->silenced, "startInput on silenced input for port %d, uid %d. Unsilencing.",
             portIdAidl,
@@ -937,7 +937,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     ssize_t index = mAudioRecordClients.indexOfKey(portId);
     if (index < 0) {
@@ -967,7 +967,7 @@
     sp<AudioPolicyEffects>audioPolicyEffects;
     sp<AudioRecordClient> client;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         audioPolicyEffects = mAudioPolicyEffects;
         ssize_t index = mAudioRecordClients.indexOfKey(portId);
         if (index < 0) {
@@ -995,7 +995,7 @@
         }
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         AutoCallerClear acc;
         mAudioPolicyManager->releaseInput(portId);
     }
@@ -1019,7 +1019,7 @@
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
     return binderStatusFromStatusT(NO_ERROR);
@@ -1043,7 +1043,7 @@
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->setStreamVolumeIndex(stream,
                                                                              index,
@@ -1065,7 +1065,7 @@
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getStreamVolumeIndex(stream, &index, device)));
@@ -1090,7 +1090,7 @@
     if (!settingsAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(
             mAudioPolicyManager->setVolumeIndexForAttributes(attributes, index, device));
@@ -1110,7 +1110,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getVolumeIndexForAttributes(attributes, index, device)));
@@ -1129,7 +1129,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getMinVolumeIndexForAttributes(attributes, index)));
@@ -1148,7 +1148,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getMaxVolumeIndexForAttributes(attributes, index)));
@@ -1190,7 +1190,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForAttributes(aa, &devices, forVolume)));
@@ -1210,7 +1210,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(mAudioPolicyManager->getOutputForEffect(&desc)));
@@ -1235,7 +1235,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(
             mAudioPolicyManager->registerEffect(&desc, io, strategy, session, id));
@@ -1247,7 +1247,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->unregisterEffect(id));
 }
@@ -1258,7 +1258,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->setEffectEnabled(id, enabled));
 }
@@ -1277,7 +1277,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->moveEffectsToIo(ids, io));
 }
@@ -1295,7 +1295,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isStreamActive(stream, inPastMs);
     return Status::ok();
@@ -1315,7 +1315,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
     return Status::ok();
@@ -1327,7 +1327,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isSourceActive(source);
     return Status::ok();
@@ -1339,7 +1339,7 @@
         return NO_INIT;
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects == 0) {
@@ -1463,7 +1463,7 @@
             convertRange(systemUsagesAidl.begin(), systemUsagesAidl.begin() + size,
                          std::back_inserter(systemUsages), aidl2legacy_AudioUsage_audio_usage_t)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1483,7 +1483,7 @@
     audio_flags_mask_t capturePolicy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_flags_mask_t_mask(capturePolicyAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         ALOGV("%s() mAudioPolicyManager == NULL", __func__);
         return binderStatusFromStatusT(NO_INIT);
@@ -1500,7 +1500,7 @@
         ALOGV("mAudioPolicyManager == NULL");
         return binderStatusFromStatusT(AUDIO_OFFLOAD_NOT_SUPPORTED);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_offload_mode_t_AudioOffloadMode(
             mAudioPolicyManager->getOffloadSupport(info)));
@@ -1525,7 +1525,7 @@
 
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
     return Status::ok();
 }
@@ -1561,7 +1561,7 @@
     std::unique_ptr<audio_port_v7[]> ports(new audio_port_v7[num_ports]);
     unsigned int generation;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1589,7 +1589,7 @@
 
 Status AudioPolicyService::listDeclaredDevicePorts(media::AudioPortRole role,
                                                     std::vector<media::AudioPortFw>* _aidl_return) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1601,7 +1601,7 @@
 Status AudioPolicyService::getAudioPort(int portId,
                                         media::AudioPortFw* _aidl_return) {
     audio_port_v7 port{ .id = portId };
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1628,7 +1628,7 @@
             aidl2legacy_int32_t_audio_port_handle_t(handleAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(AudioValidator::validateAudioPatch(patch)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1647,7 +1647,7 @@
 {
     audio_patch_handle_t handle = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_patch_handle_t(handleAidl));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1672,7 +1672,7 @@
     std::unique_ptr<audio_patch[]> patches(new audio_patch[num_patches]);
     unsigned int generation;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1710,7 +1710,7 @@
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(AudioValidator::validateAudioPortConfig(config)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1728,7 +1728,7 @@
     audio_devices_t device;
 
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         if (mAudioPolicyManager == NULL) {
             return binderStatusFromStatusT(NO_INIT);
         }
@@ -1750,7 +1750,7 @@
 {
     audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(sessionAidl));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1769,7 +1769,7 @@
             convertRange(mixesAidl.begin(), mixesAidl.begin() + size, std::back_inserter(mixes),
                          aidl2legacy_AudioMix)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // loopback|render only need a MediaProjection (checked in caller AudioService.java)
     bool needModifyAudioRouting = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
@@ -1810,9 +1810,26 @@
     }
 }
 
+Status
+AudioPolicyService::getRegisteredPolicyMixes(std::vector<::android::media::AudioMix>* mixesAidl) {
+    if (mAudioPolicyManager == nullptr) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+
+    std::vector<AudioMix> mixes;
+    int status = mAudioPolicyManager->getRegisteredPolicyMixes(mixes);
+
+    for (const auto& mix : mixes) {
+        media::AudioMix aidlMix = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_AudioMix(mix));
+        mixesAidl->push_back(aidlMix);
+    }
+
+    return binderStatusFromStatusT(status);
+}
+
 Status AudioPolicyService::updatePolicyMixes(
         const ::std::vector<::android::media::AudioMixUpdate>& updates) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     for (const auto& update : updates) {
         AudioMix mix = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_AudioMix(update.audioMix));
         std::vector<AudioMixMatchCriterion> newCriteria =
@@ -1834,7 +1851,7 @@
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
                                                         aidl2legacy_AudioDeviceTypeAddress));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1848,7 +1865,7 @@
 Status AudioPolicyService::removeUidDeviceAffinities(int32_t uidAidl) {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1867,7 +1884,7 @@
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
                                                         aidl2legacy_AudioDeviceTypeAddress));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1881,7 +1898,7 @@
 Status AudioPolicyService::removeUserIdDeviceAffinities(int32_t userIdAidl) {
     int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1905,7 +1922,7 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             AudioValidator::validateAudioAttributes(attributes, "68953950")));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1926,7 +1943,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1942,7 +1959,7 @@
     if (!settingsAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->setMasterMono(mono));
 }
@@ -1952,7 +1969,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->getMasterMono(_aidl_return));
 }
@@ -1970,7 +1987,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->getStreamVolumeDB(stream, index, device);
     return Status::ok();
@@ -1991,7 +2008,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getSurroundFormats(&numSurroundFormats, surroundFormats.get(),
@@ -2022,7 +2039,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getReportedSurroundFormats(
@@ -2044,7 +2061,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
@@ -2064,7 +2081,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(
             mAudioPolicyManager->setSurroundFormatEnabled(audioFormat, enabled));
@@ -2087,7 +2104,7 @@
     std::vector<uid_t> uids;
     RETURN_IF_BINDER_ERROR(convertInt32VectorToUidVectorWithLimit(uidsAidl, uids));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setAssistantUids(uids);
     return Status::ok();
 }
@@ -2097,7 +2114,7 @@
     std::vector<uid_t> activeUids;
     RETURN_IF_BINDER_ERROR(convertInt32VectorToUidVectorWithLimit(activeUidsAidl, activeUids));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setActiveAssistantUids(activeUids);
     return Status::ok();
 }
@@ -2107,7 +2124,7 @@
     std::vector<uid_t> uids;
     RETURN_IF_BINDER_ERROR(convertInt32VectorToUidVectorWithLimit(uidsAidl, uids));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setA11yUids(uids);
     return Status::ok();
 }
@@ -2115,7 +2132,7 @@
 Status AudioPolicyService::setCurrentImeUid(int32_t uidAidl)
 {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setCurrentImeUid(uid);
     return Status::ok();
 }
@@ -2125,7 +2142,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isHapticPlaybackSupported();
     return Status::ok();
@@ -2136,7 +2153,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isUltrasoundSupported();
     return Status::ok();
@@ -2147,7 +2164,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isHotwordStreamSupported(lookbackAudio);
     return Status::ok();
@@ -2160,7 +2177,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->listAudioProductStrategies(strategies)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2180,7 +2197,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getProductStrategyFromAudioAttributes(
                     aa, productStrategy, fallbackOnDefault)));
@@ -2195,7 +2212,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->listAudioVolumeGroups(groups)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2214,7 +2231,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(
                     mAudioPolicyManager->getVolumeGroupFromAudioAttributes(
@@ -2225,7 +2242,7 @@
 
 Status AudioPolicyService::setRttEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setRttEnabled(enabled);
     return Status::ok();
 }
@@ -2235,7 +2252,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isCallScreenModeSupported();
     return Status::ok();
@@ -2256,7 +2273,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     status_t status = mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -2279,7 +2296,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     status_t status = mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role, devices);
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -2296,7 +2313,7 @@
    if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     status_t status = mAudioPolicyManager->clearDevicesRoleForStrategy(strategy, role);
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -2317,7 +2334,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2347,7 +2364,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->setDevicesRoleForCapturePreset(audioSource, role, devices));
 }
@@ -2367,7 +2384,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->addDevicesRoleForCapturePreset(audioSource, role, devices));
 }
@@ -2387,7 +2404,7 @@
    if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
 }
@@ -2402,7 +2419,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->clearDevicesRoleForCapturePreset(audioSource, role));
 }
@@ -2420,7 +2437,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2467,7 +2484,7 @@
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
                                                         aidl2legacy_AudioDeviceTypeAddress));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = mAudioPolicyManager->canBeSpatialized(&attr, &config, devices);
     return Status::ok();
 }
@@ -2486,7 +2503,7 @@
             aidl2legacy_AudioAttributes_audio_attributes_t(attrAidl));
     audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = static_cast<media::AudioDirectMode>(
             VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_direct_mode_t_int32_t_mask(
                     mAudioPolicyManager->getDirectPlaybackSupport(&attr, &config))));
@@ -2503,7 +2520,7 @@
             aidl2legacy_AudioAttributes_audio_attributes_t(attrAidl));
     AudioProfileVector audioProfiles;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDirectProfilesForAttributes(&attr, audioProfiles)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2523,7 +2540,7 @@
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
     std::vector<audio_mixer_attributes_t> mixerAttrs;
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->getSupportedMixerAttributes(
                     portId, mixerAttrs)));
@@ -2551,7 +2568,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->setPreferredMixerAttributes(&attr, portId, uid, &mixerAttr));
 }
@@ -2569,7 +2586,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     audio_mixer_attributes_t mixerAttr = AUDIO_MIXER_ATTRIBUTES_INITIALIZER;
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->getPreferredMixerAttributes(
@@ -2593,7 +2610,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->clearPreferredMixerAttributes(&attr, portId, uid));
 }
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 5d3788d..bc6498a 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -166,7 +166,8 @@
 BINDER_METHOD_ENTRY(setPreferredMixerAttributes) \
 BINDER_METHOD_ENTRY(getPreferredMixerAttributes) \
 BINDER_METHOD_ENTRY(clearPreferredMixerAttributes) \
-
+BINDER_METHOD_ENTRY(getRegisteredPolicyMixes) \
+                                                     \
 // singleton for Binder Method Statistics for IAudioPolicyService
 static auto& getIAudioPolicyServiceStatistics() {
     using Code = int;
@@ -265,7 +266,7 @@
             .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
             .record(); });
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
 
         // start audio commands thread
         mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
@@ -280,11 +281,11 @@
 
     // load audio processing modules
     const sp<EffectsFactoryHalInterface> effectsFactoryHal = EffectsFactoryHalInterface::create();
-    sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects(effectsFactoryHal);
-    sp<UidPolicy> uidPolicy = new UidPolicy(this);
-    sp<SensorPrivacyPolicy> sensorPrivacyPolicy = new SensorPrivacyPolicy(this);
+    auto audioPolicyEffects = sp<AudioPolicyEffects>::make(effectsFactoryHal);
+    auto uidPolicy = sp<UidPolicy>::make(this);
+    auto sensorPrivacyPolicy = sp<SensorPrivacyPolicy>::make(this);
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mAudioPolicyEffects = audioPolicyEffects;
         mUidPolicy = uidPolicy;
         mSensorPrivacyPolicy = sensorPrivacyPolicy;
@@ -294,16 +295,16 @@
 
     // Create spatializer if supported
     if (mAudioPolicyManager != nullptr) {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
         AudioDeviceTypeAddrVector devices;
         bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
         if (hasSpatializer) {
             // Unlock as Spatializer::create() will use the callback and acquire the
             // AudioPolicyService_Mutex.
-            mLock.unlock();
+            mMutex.unlock();
             mSpatializer = Spatializer::create(this, effectsFactoryHal);
-            mLock.lock();
+            mMutex.lock();
         }
         if (mSpatializer == nullptr) {
             // No spatializer created, signal the reason: NO_INIT a failure, OK means intended.
@@ -356,7 +357,7 @@
         ALOGW("%s got NULL client", __FUNCTION__);
         return Status::ok();
     }
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
     pid_t pid = IPCThreadState::self()->getCallingPid();
@@ -379,7 +380,7 @@
 
 Status AudioPolicyService::setAudioPortCallbacksEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
     pid_t pid = IPCThreadState::self()->getCallingPid();
@@ -394,7 +395,7 @@
 
 Status AudioPolicyService::setAudioVolumeGroupCallbacksEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
     pid_t pid = IPCThreadState::self()->getCallingPid();
@@ -412,7 +413,7 @@
 {
     bool hasSameUid = false;
     {
-        Mutex::Autolock _l(mNotificationClientsLock);
+        audio_utils::lock_guard _l(mNotificationClientsMutex);
         int64_t token = ((int64_t)uid<<32) | pid;
         mNotificationClients.removeItem(token);
         for (size_t i = 0; i < mNotificationClients.size(); i++) {
@@ -423,7 +424,7 @@
         }
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         if (mAudioPolicyManager && !hasSameUid) {
             // called from binder death notification: no need to clear caller identity
             mAudioPolicyManager->releaseResourcesForUid(uid);
@@ -438,7 +439,7 @@
 
 void AudioPolicyService::doOnAudioPortListUpdate()
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onAudioPortListUpdate();
     }
@@ -451,7 +452,7 @@
 
 void AudioPolicyService::doOnAudioPatchListUpdate()
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onAudioPatchListUpdate();
     }
@@ -464,7 +465,7 @@
 
 void AudioPolicyService::doOnAudioVolumeGroupChanged(volume_group_t group, int flags)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onAudioVolumeGroupChanged(group, flags);
     }
@@ -479,7 +480,7 @@
 
 void AudioPolicyService::doOnDynamicPolicyMixStateUpdate(const String8& regId, int32_t state)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onDynamicPolicyMixStateUpdate(regId, state);
     }
@@ -509,7 +510,7 @@
                                                   audio_patch_handle_t patchHandle,
                                                   audio_source_t source)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onRecordingConfigurationUpdate(event, clientInfo,
                 clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
@@ -523,7 +524,7 @@
 
 void AudioPolicyService::doOnRoutingUpdated()
 {
-  Mutex::Autolock _l(mNotificationClientsLock);
+  audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onRoutingUpdated();
     }
@@ -536,7 +537,7 @@
 
 void AudioPolicyService::doOnVolumeRangeInitRequest()
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onVolumeRangeInitRequest();
     }
@@ -544,7 +545,7 @@
 
 void AudioPolicyService::onCheckSpatializer()
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     onCheckSpatializer_l();
 }
 
@@ -568,7 +569,7 @@
             const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
             audio_config_base_t config = mSpatializer->getAudioInConfig();
 
-            Mutex::Autolock _l(mLock);
+            audio_utils::lock_guard _l(mMutex);
             status_t status =
                     mAudioPolicyManager->getSpatializerOutput(&config, &attr, &newOutput);
             ALOGV("%s currentOutput %d newOutput %d channel_mask %#x",
@@ -577,13 +578,13 @@
                 return;
             }
             size_t numActiveTracks = countActiveClientsOnOutput_l(newOutput);
-            mLock.unlock();
+            mMutex.unlock();
             // It is OK to call detachOutput() is none is already attached.
             mSpatializer->detachOutput();
             if (status == NO_ERROR && newOutput != AUDIO_IO_HANDLE_NONE) {
                 status = mSpatializer->attachOutput(newOutput, numActiveTracks);
             }
-            mLock.lock();
+            mMutex.lock();
             if (status != NO_ERROR) {
                 mAudioPolicyManager->releaseSpatializerOutput(newOutput);
             }
@@ -592,7 +593,7 @@
             audio_io_handle_t output = mSpatializer->detachOutput();
 
             if (output != AUDIO_IO_HANDLE_NONE) {
-                Mutex::Autolock _l(mLock);
+                audio_utils::lock_guard _l(mMutex);
                 mAudioPolicyManager->releaseSpatializerOutput(output);
             }
         }
@@ -627,7 +628,7 @@
     audio_io_handle_t output = mSpatializer->getOutput();
     size_t activeClients;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         activeClients = countActiveClientsOnOutput_l(output);
     }
     mSpatializer->updateActiveTracks(activeClients);
@@ -783,12 +784,8 @@
             IPCThreadState::self()->getCallingPid());
 }
 
-static bool dumpTryLock(Mutex& mutex) ACQUIRE(mutex) NO_THREAD_SAFETY_ANALYSIS
-{
-    return mutex.timedLock(kDumpLockTimeoutNs) == NO_ERROR;
-}
-
-static void dumpReleaseLock(Mutex& mutex, bool locked) RELEASE(mutex) NO_THREAD_SAFETY_ANALYSIS
+static void dumpReleaseLock(audio_utils::mutex& mutex, bool locked)
+        RELEASE(mutex) NO_THREAD_SAFETY_ANALYSIS
 {
     if (locked) mutex.unlock();
 }
@@ -825,7 +822,7 @@
 
 void AudioPolicyService::updateUidStates()
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     updateUidStates_l();
 }
 
@@ -1027,7 +1024,7 @@
         bool isTopOrLatestAssistant = latestActiveAssistant == nullptr ? false :
             current->attributionSource.uid == latestActiveAssistant->attributionSource.uid;
 
-        auto canCaptureIfInCallOrCommunication = [&](const auto &recordClient) REQUIRES(mLock) {
+        auto canCaptureIfInCallOrCommunication = [&](const auto &recordClient) REQUIRES(mMutex) {
             uid_t recordUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(
                 recordClient->attributionSource.uid));
             bool canCaptureCall = recordClient->canCaptureOutput;
@@ -1205,11 +1202,12 @@
 }
 
 status_t AudioPolicyService::dump(int fd, const Vector<String16>& args __unused)
+NO_THREAD_SAFETY_ANALYSIS  // update for trylock.
 {
     if (!dumpAllowed()) {
         dumpPermissionDenial(fd);
     } else {
-        const bool locked = dumpTryLock(mLock);
+        const bool locked = mMutex.try_lock(kDumpLockTimeoutNs);
         if (!locked) {
             String8 result(kDeadlockedString);
             write(fd, result.c_str(), result.size());
@@ -1238,7 +1236,7 @@
 
         mPackageManager.dump(fd);
 
-        dumpReleaseLock(mLock, locked);
+        dumpReleaseLock(mMutex, locked);
 
         if (mSpatializer != nullptr) {
             std::string dumpString = mSpatializer->toString(1 /* level */);
@@ -1351,7 +1349,8 @@
         case TRANSACTION_getDevicesForRoleAndCapturePreset:
         case TRANSACTION_getSpatializer:
         case TRANSACTION_setPreferredMixerAttributes:
-        case TRANSACTION_clearPreferredMixerAttributes: {
+        case TRANSACTION_clearPreferredMixerAttributes:
+        case TRANSACTION_getRegisteredPolicyMixes: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1483,7 +1482,7 @@
 
     sp<UidPolicy> uidPolicy;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         uidPolicy = mUidPolicy;
     }
     if (uidPolicy) {
@@ -1512,7 +1511,7 @@
 
     sp<UidPolicy> uidPolicy;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         uidPolicy = mUidPolicy;
     }
     if (uidPolicy) {
@@ -1541,7 +1540,7 @@
 
     sp<UidPolicy> uidPolicy;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         uidPolicy = mUidPolicy;
     }
     if (uidPolicy) {
@@ -1579,7 +1578,7 @@
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("audioserver"));
     if (!res) {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mObserverRegistered = true;
     } else {
         ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
@@ -1591,12 +1590,12 @@
 void AudioPolicyService::UidPolicy::unregisterSelf() {
     mAm.unlinkToDeath(this);
     mAm.unregisterUidObserver(this);
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mObserverRegistered = false;
 }
 
 void AudioPolicyService::UidPolicy::binderDied(__unused const wp<IBinder> &who) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mCachedUids.clear();
     mObserverRegistered = false;
 }
@@ -1604,7 +1603,7 @@
 void AudioPolicyService::UidPolicy::checkRegistered() {
     bool needToReregister = false;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         needToReregister = !mObserverRegistered;
     }
     if (needToReregister) {
@@ -1617,7 +1616,7 @@
     if (isServiceUid(uid)) return true;
     checkRegistered();
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         auto overrideIter = mOverrideUids.find(uid);
         if (overrideIter != mOverrideUids.end()) {
             return overrideIter->second.first;
@@ -1632,7 +1631,7 @@
     ActivityManager am;
     bool active = am.isUidActive(uid, String16("audioserver"));
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mCachedUids.insert(std::pair<uid_t,
                            std::pair<bool, int>>(uid, std::pair<bool, int>(active,
                                                       ActivityManager::PROCESS_STATE_UNKNOWN)));
@@ -1646,7 +1645,7 @@
     }
     checkRegistered();
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         auto overrideIter = mOverrideUids.find(uid);
         if (overrideIter != mOverrideUids.end()) {
             if (overrideIter->second.first) {
@@ -1681,7 +1680,7 @@
         state = am.getUidProcessState(uid, String16("audioserver"));
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mCachedUids.insert(std::pair<uid_t,
                            std::pair<bool, int>>(uid, std::pair<bool, int>(active, state)));
     }
@@ -1736,7 +1735,7 @@
     bool wasActive = isUidActive(uid);
     int previousState = getUidState(uid);
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         updateUidLocked(uids, uid, active, state, insert);
     }
     if (wasActive != isUidActive(uid) || state != previousState) {
@@ -1771,7 +1770,7 @@
 }
 
 bool AudioPolicyService::UidPolicy::isA11yOnTop() {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     for (const auto &uid : mCachedUids) {
         if (!isA11yUid(uid.first)) {
             continue;
@@ -1902,7 +1901,7 @@
 {
     nsecs_t waitTime = -1;
 
-    mLock.lock();
+    audio_utils::unique_lock ul(mMutex);
     while (!exitPending())
     {
         sp<AudioPolicyService> svc;
@@ -1923,27 +1922,27 @@
                     VolumeData *data = (VolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set volume stream %d, \
                             volume %f, output %d", data->mStream, data->mVolume, data->mIO);
-                    mLock.unlock();
+                    ul.unlock();
                     command->mStatus = AudioSystem::setStreamVolume(data->mStream,
                                                                     data->mVolume,
                                                                     data->mIO);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case SET_PARAMETERS: {
                     ParametersData *data = (ParametersData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
                             data->mKeyValuePairs.c_str(), data->mIO);
-                    mLock.unlock();
+                    ul.unlock();
                     command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case SET_VOICE_VOLUME: {
                     VoiceVolumeData *data = (VoiceVolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set voice volume volume %f",
                             data->mVolume);
-                    mLock.unlock();
+                    ul.unlock();
                     command->mStatus = AudioSystem::setVoiceVolume(data->mVolume);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case STOP_OUTPUT: {
                     StopOutputData *data = (StopOutputData *)command->mParam.get();
@@ -1953,9 +1952,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doStopOutput(data->mPortId);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case RELEASE_OUTPUT: {
                     ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get();
@@ -1965,9 +1964,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doReleaseOutput(data->mPortId);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case CREATE_AUDIO_PATCH: {
                     CreateAudioPatchData *data = (CreateAudioPatchData *)command->mParam.get();
@@ -1976,9 +1975,9 @@
                     if (af == 0) {
                         command->mStatus = PERMISSION_DENIED;
                     } else {
-                        mLock.unlock();
+                        ul.unlock();
                         command->mStatus = af->createAudioPatch(&data->mPatch, &data->mHandle);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case RELEASE_AUDIO_PATCH: {
@@ -1988,9 +1987,9 @@
                     if (af == 0) {
                         command->mStatus = PERMISSION_DENIED;
                     } else {
-                        mLock.unlock();
+                        ul.unlock();
                         command->mStatus = af->releaseAudioPatch(data->mHandle);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case UPDATE_AUDIOPORT_LIST: {
@@ -1999,9 +1998,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnAudioPortListUpdate();
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case UPDATE_AUDIOPATCH_LIST: {
                     ALOGV("AudioCommandThread() processing update audio patch list");
@@ -2009,9 +2008,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnAudioPatchListUpdate();
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case CHANGED_AUDIOVOLUMEGROUP: {
                     AudioVolumeGroupData *data =
@@ -2021,9 +2020,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnAudioVolumeGroupChanged(data->mGroup, data->mFlags);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case SET_AUDIOPORT_CONFIG: {
                     SetAudioPortConfigData *data = (SetAudioPortConfigData *)command->mParam.get();
@@ -2032,9 +2031,9 @@
                     if (af == 0) {
                         command->mStatus = PERMISSION_DENIED;
                     } else {
-                        mLock.unlock();
+                        ul.unlock();
                         command->mStatus = af->setAudioPortConfig(&data->mConfig);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case DYN_POLICY_MIX_STATE_UPDATE: {
@@ -2046,9 +2045,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnDynamicPolicyMixStateUpdate(data->mRegId, data->mState);
-                    mLock.lock();
+                    ul.lock();
                     } break;
                 case RECORDING_CONFIGURATION_UPDATE: {
                     RecordingConfigurationUpdateData *data =
@@ -2058,21 +2057,21 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnRecordingConfigurationUpdate(data->mEvent, &data->mClientInfo,
                             &data->mClientConfig, data->mClientEffects,
                             &data->mDeviceConfig, data->mEffects,
                             data->mPatchHandle, data->mSource);
-                    mLock.lock();
+                    ul.lock();
                     } break;
                 case SET_EFFECT_SUSPENDED: {
                     SetEffectSuspendedData *data = (SetEffectSuspendedData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set effect suspended");
                     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
                     if (af != 0) {
-                        mLock.unlock();
+                        ul.unlock();
                         af->setEffectSuspended(data->mEffectId, data->mSessionId, data->mSuspended);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case AUDIO_MODULES_UPDATE: {
@@ -2081,9 +2080,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnNewAudioModulesAvailable();
-                    mLock.lock();
+                    ul.lock();
                     } break;
                 case ROUTING_UPDATED: {
                     ALOGV("AudioCommandThread() processing routing update");
@@ -2091,9 +2090,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnRoutingUpdated();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case UPDATE_UID_STATES: {
@@ -2102,9 +2101,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->updateUidStates();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case CHECK_SPATIALIZER_OUTPUT: {
@@ -2113,9 +2112,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnCheckSpatializer();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case UPDATE_ACTIVE_SPATIALIZER_TRACKS: {
@@ -2124,9 +2123,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnUpdateActiveSpatializerTracks();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case VOL_RANGE_INIT_REQUEST: {
@@ -2135,28 +2134,28 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnVolumeRangeInitRequest();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
                 {
-                    Mutex::Autolock _l(command->mLock);
+                    audio_utils::lock_guard _l(command->mMutex);
                     if (command->mWaitStatus) {
                         command->mWaitStatus = false;
-                        command->mCond.signal();
+                        command->mCond.notify_one();
                     }
                 }
                 waitTime = -1;
-                // release mLock before releasing strong reference on the service as
+                // release ul before releasing strong reference on the service as
                 // AudioPolicyService destructor calls AudioCommandThread::exit() which
-                // acquires mLock.
-                mLock.unlock();
+                // acquires ul.
+                ul.unlock();
                 svc.clear();
-                mLock.lock();
+                ul.lock();
             } else {
                 waitTime = mAudioCommands[0]->mTime - curTime;
                 break;
@@ -2174,9 +2173,10 @@
         if (!exitPending()) {
             ALOGV("AudioCommandThread() going to sleep");
             if (waitTime == -1) {
-                mWaitWorkCV.wait(mLock);
+                mWaitWorkCV.wait(ul);
             } else {
-                mWaitWorkCV.waitRelative(mLock, waitTime);
+                // discard return value.
+                mWaitWorkCV.wait_for(ul, std::chrono::nanoseconds(waitTime));
             }
         }
     }
@@ -2184,17 +2184,17 @@
     if (!mAudioCommands.isEmpty()) {
         release_wake_lock(mName.c_str());
     }
-    mLock.unlock();
     return false;
 }
 
 status_t AudioPolicyService::AudioCommandThread::dump(int fd)
+NO_THREAD_SAFETY_ANALYSIS  // trylock
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
 
-    const bool locked = dumpTryLock(mLock);
+    const bool locked = mMutex.try_lock(kDumpLockTimeoutNs);
     if (!locked) {
         String8 result2(kCmdDeadlockedString);
         write(fd, result2.c_str(), result2.size());
@@ -2217,7 +2217,7 @@
 
     write(fd, result.c_str(), result.size());
 
-    dumpReleaseLock(mLock, locked);
+    dumpReleaseLock(mMutex, locked);
 
     return NO_ERROR;
 }
@@ -2475,14 +2475,15 @@
 status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
 {
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         insertCommand_l(command, delayMs);
-        mWaitWorkCV.signal();
+        mWaitWorkCV.notify_one();
     }
-    Mutex::Autolock _l(command->mLock);
+    audio_utils::unique_lock ul(command->mMutex);
     while (command->mWaitStatus) {
         nsecs_t timeOutNs = kAudioCommandTimeoutNs + milliseconds(delayMs);
-        if (command->mCond.waitRelative(command->mLock, timeOutNs) != NO_ERROR) {
+        if (command->mCond.wait_for(
+                ul, std::chrono::nanoseconds(timeOutNs), getTid()) == std::cv_status::timeout) {
             command->mStatus = TIMED_OUT;
             command->mWaitStatus = false;
         }
@@ -2490,7 +2491,7 @@
     return command->mStatus;
 }
 
-// insertCommand_l() must be called with mLock held
+// insertCommand_l() must be called with mMutex held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(sp<AudioCommand>& command, int delayMs)
 {
     ssize_t i;  // not size_t because i will count down to -1
@@ -2678,9 +2679,9 @@
 {
     ALOGV("AudioCommandThread::exit");
     {
-        AutoMutex _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         requestExit();
-        mWaitWorkCV.signal();
+        mWaitWorkCV.notify_one();
     }
     // Note that we can call it from the thread loop if all other references have been released
     // but it will safely return WOULD_BLOCK in this case
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index aaf0b1b..bd56366 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -20,6 +20,7 @@
 #include <android/media/BnAudioPolicyService.h>
 #include <android/media/GetSpatializerResponse.h>
 #include <android-base/thread_annotations.h>
+#include <audio_utils/mutex.h>
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
 #include <cutils/compiler.h>
@@ -310,6 +311,8 @@
     binder::Status clearPreferredMixerAttributes(const media::audio::common::AudioAttributes& attr,
                                                  int32_t portId,
                                                  int32_t uid) override;
+    binder::Status getRegisteredPolicyMixes(
+            std::vector <::android::media::AudioMix>* mixes) override;
 
     status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
 
@@ -387,10 +390,10 @@
      * by audio policy manager and attach/detach the spatializer effect accordingly.
      */
     void onCheckSpatializer() override;
-    void onCheckSpatializer_l() REQUIRES(mLock);
+    void onCheckSpatializer_l() REQUIRES(mMutex);
     void doOnCheckSpatializer();
 
-    void onUpdateActiveSpatializerTracks_l() REQUIRES(mLock);
+    void onUpdateActiveSpatializerTracks_l() REQUIRES(mMutex);
     void doOnUpdateActiveSpatializerTracks();
 
 
@@ -402,14 +405,14 @@
                         AudioPolicyService() ANDROID_API;
     virtual             ~AudioPolicyService();
 
-            status_t dumpInternals(int fd) REQUIRES(mLock);
+    status_t dumpInternals(int fd) REQUIRES(mMutex);
 
     // Handles binder shell commands
     virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
 
 
     // Sets whether the given UID records only silence
-    virtual void setAppState_l(sp<AudioRecordClient> client, app_state_t state) REQUIRES(mLock);
+    virtual void setAppState_l(sp<AudioRecordClient> client, app_state_t state) REQUIRES(mMutex);
 
     // Overrides the UID state as if it is idle
     status_t handleSetUidState(Vector<String16>& args, int err);
@@ -435,9 +438,9 @@
                            const AttributionSourceState& attributionSource);
 
     void updateUidStates();
-    void updateUidStates_l() REQUIRES(mLock);
+    void updateUidStates_l() REQUIRES(mMutex);
 
-    void silenceAllRecordings_l() REQUIRES(mLock);
+    void silenceAllRecordings_l() REQUIRES(mMutex);
 
     static bool isVirtualSource(audio_source_t source);
 
@@ -510,11 +513,11 @@
         void checkRegistered();
 
         wp<AudioPolicyService> mService;
-        Mutex mLock;
+        audio_utils::mutex mMutex{audio_utils::MutexOrder::kUidPolicy_Mutex};
         ActivityManager mAm;
         bool mObserverRegistered = false;
-        std::unordered_map<uid_t, std::pair<bool, int>> mOverrideUids GUARDED_BY(mLock);
-        std::unordered_map<uid_t, std::pair<bool, int>> mCachedUids GUARDED_BY(mLock);
+        std::unordered_map<uid_t, std::pair<bool, int>> mOverrideUids GUARDED_BY(mMutex);
+        std::unordered_map<uid_t, std::pair<bool, int>> mCachedUids GUARDED_BY(mMutex);
         std::vector<uid_t> mAssistantUids;
         std::vector<uid_t> mActiveAssistantUids;
         std::vector<uid_t> mA11yUids;
@@ -539,6 +542,10 @@
             binder::Status onSensorPrivacyChanged(int toggleType, int sensor,
                                                   bool enabled);
 
+            binder::Status onSensorPrivacyStateChanged(int, int, int) {
+                return binder::Status::ok();
+            }
+
         private:
             wp<AudioPolicyService> mService;
             std::atomic_bool mSensorPrivacyEnabled = false;
@@ -641,8 +648,8 @@
 
             int mCommand;   // SET_VOLUME, SET_PARAMETERS...
             nsecs_t mTime;  // time stamp
-            Mutex mLock;    // mutex associated to mCond
-            Condition mCond; // condition for status return
+            audio_utils::mutex mMutex{audio_utils::MutexOrder::kAudioCommand_Mutex};
+            audio_utils::condition_variable mCond; // condition for status return
             status_t mStatus; // command status
             bool mWaitStatus; // true if caller is waiting for status
             sp<AudioCommandData> mParam;     // command specific parameter data
@@ -730,8 +737,8 @@
             bool mSuspended;
         };
 
-        Mutex   mLock;
-        Condition mWaitWorkCV;
+        mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kCommandThread_Mutex};
+        audio_utils::condition_variable mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
         sp<AudioCommand> mLastCommand;      // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
@@ -996,12 +1003,12 @@
      * @return the number of active tracks.
      */
     size_t countActiveClientsOnOutput_l(
-        audio_io_handle_t output, bool spatializedOnly = true) REQUIRES(mLock);
+            audio_io_handle_t output, bool spatializedOnly = true) REQUIRES(mMutex);
 
-    mutable Mutex mLock;    // prevents concurrent access to AudioPolicy manager functions changing
-                            // device connection state  or routing
-    // Note: lock acquisition order is always mLock > mEffectsLock:
-    // mLock protects AudioPolicyManager methods that can call into audio flinger
+    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kAudioPolicyService_Mutex};
+    // prevents concurrent access to AudioPolicy manager functions changing
+    // device connection state or routing.
+    // mMutex protects AudioPolicyManager methods that can call into audio flinger
     // and possibly back in to audio policy service and acquire mEffectsLock.
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
@@ -1009,29 +1016,30 @@
     AudioPolicyClient *mAudioPolicyClient;
     std::vector<audio_usage_t> mSupportedSystemUsages;
 
-    Mutex mNotificationClientsLock;
+    mutable audio_utils::mutex mNotificationClientsMutex{
+            audio_utils::MutexOrder::kAudioPolicyService_NotificationClientsMutex};
     DefaultKeyedVector<int64_t, sp<NotificationClient>> mNotificationClients
-        GUARDED_BY(mNotificationClientsLock);
+            GUARDED_BY(mNotificationClientsMutex);
     // Manage all effects configured in audio_effects.conf
-    // never hold AudioPolicyService::mLock when calling AudioPolicyEffects methods as
+    // never hold AudioPolicyService::mMutex when calling AudioPolicyEffects methods as
     // those can call back into AudioPolicyService methods and try to acquire the mutex
-    sp<AudioPolicyEffects> mAudioPolicyEffects GUARDED_BY(mLock);
-    audio_mode_t mPhoneState GUARDED_BY(mLock);
-    uid_t mPhoneStateOwnerUid GUARDED_BY(mLock);
+    sp<AudioPolicyEffects> mAudioPolicyEffects GUARDED_BY(mMutex);
+    audio_mode_t mPhoneState GUARDED_BY(mMutex);
+    uid_t mPhoneStateOwnerUid GUARDED_BY(mMutex);
 
-    sp<UidPolicy> mUidPolicy GUARDED_BY(mLock);
-    sp<SensorPrivacyPolicy> mSensorPrivacyPolicy GUARDED_BY(mLock);
+    sp<UidPolicy> mUidPolicy GUARDED_BY(mMutex);
+    sp<SensorPrivacyPolicy> mSensorPrivacyPolicy GUARDED_BY(mMutex);
 
     DefaultKeyedVector<audio_port_handle_t, sp<AudioRecordClient>> mAudioRecordClients
-        GUARDED_BY(mLock);
+            GUARDED_BY(mMutex);
     DefaultKeyedVector<audio_port_handle_t, sp<AudioPlaybackClient>> mAudioPlaybackClients
-        GUARDED_BY(mLock);
+            GUARDED_BY(mMutex);
 
     MediaPackageManager mPackageManager; // To check allowPlaybackCapture
 
     CaptureStateNotifier mCaptureStateNotifier;
 
-    // created in onFirstRef() and never cleared: does not need to be guarded by mLock
+    // created in onFirstRef() and never cleared: does not need to be guarded by mMutex
     sp<Spatializer> mSpatializer;
 
     void *mLibraryHandle = nullptr;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 83030a3..dbc48ae 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -31,6 +31,7 @@
 #include <audio_utils/fixedfft.h>
 #include <com_android_media_audio.h>
 #include <cutils/bitops.h>
+#include <cutils/properties.h>
 #include <hardware/sensors.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -229,7 +230,7 @@
         return;
     }
     auto latencyModesStrs = android::sysprop::BluetoothProperties::dsa_transport_preference();
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     // First load preferred low latency modes ordered from the property
     for (auto str : latencyModesStrs) {
         if (!str.has_value()) continue;
@@ -394,7 +395,13 @@
         return status;
     }
     for (const auto channelMask : channelMasks) {
-        if (!audio_is_channel_mask_spatialized(channelMask)) {
+        static const bool stereo_spatialization_enabled =
+                property_get_bool("ro.audio.stereo_spatialization_enabled", false);
+        const bool channel_mask_spatialized =
+                (stereo_spatialization_enabled && com_android_media_audio_stereo_spatialization())
+                ? audio_channel_mask_contains_stereo(channelMask)
+                : audio_is_channel_mask_spatialized(channelMask);
+        if (!channel_mask_spatialized) {
             ALOGW("%s: ignoring channelMask:%#x", __func__, channelMask);
             continue;
         }
@@ -461,7 +468,7 @@
 
 /** Gets the channel mask, sampling rate and format set for the spatializer input. */
 audio_config_base_t Spatializer::getAudioInConfig() const {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
     // For now use highest supported channel count
     config.channel_mask = getMaxChannelMask(mChannelMasks, FCC_LIMIT);
@@ -470,7 +477,7 @@
 
 status_t Spatializer::registerCallback(
         const sp<media::INativeSpatializerCallback>& callback) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (callback == nullptr) {
         return BAD_VALUE;
     }
@@ -498,7 +505,7 @@
 // IBinder::DeathRecipient
 void Spatializer::binderDied(__unused const wp<IBinder> &who) {
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         mLevel = Spatialization::Level::NONE;
         mSpatializerCallback.clear();
     }
@@ -527,7 +534,7 @@
     sp<media::INativeSpatializerCallback> callback;
     bool levelChanged = false;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         levelChanged = mLevel != level;
         mLevel = level;
         callback = mSpatializerCallback;
@@ -551,7 +558,7 @@
     if (level == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *level = mLevel;
     ALOGV("%s level %d", __func__, (int)*level);
     return Status::ok();
@@ -562,14 +569,14 @@
     if (supports == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *supports = mSupportsHeadTracking;
     return Status::ok();
 }
 
 Status Spatializer::getSupportedHeadTrackingModes(
         std::vector<HeadTracking::Mode>* modes) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     ALOGV("%s", __func__);
     if (modes == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
@@ -585,7 +592,7 @@
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
     mLocalLog.log("%s with %s", __func__, ToString(mode).c_str());
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     switch (mode) {
         case HeadTracking::Mode::OTHER:
             return binderStatusFromStatusT(BAD_VALUE);
@@ -610,7 +617,7 @@
     if (mode == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *mode = mActualHeadTrackingMode;
     ALOGV("%s mode %d", __func__, (int)*mode);
     return Status::ok();
@@ -620,7 +627,7 @@
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mPoseController != nullptr) {
         mPoseController->recenter();
     }
@@ -637,7 +644,7 @@
         ALOGW("Invalid screenToStage vector.");
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mPoseController != nullptr) {
         mLocalLog.log("%s with screenToStage %s", __func__,
                 media::VectorRecorder::toString<float>(screenToStage).c_str());
@@ -650,7 +657,7 @@
     ALOGV("%s", __func__);
     bool levelChanged = false;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         if (mSpatializerCallback == nullptr) {
             return binderStatusFromStatusT(INVALID_OPERATION);
         }
@@ -674,7 +681,7 @@
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mHeadSensor != sensorHandle) {
         mLocalLog.log("%s with 0x%08x", __func__, sensorHandle);
         mHeadSensor = sensorHandle;
@@ -689,7 +696,7 @@
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mScreenSensor != sensorHandle) {
         mLocalLog.log("%s with 0x%08x", __func__, sensorHandle);
         mScreenSensor = sensorHandle;
@@ -708,7 +715,7 @@
     // It is possible due to numerical inaccuracies to exceed the boundaries of 0 to 2 * M_PI.
     ALOGI_IF(angle != physicalToLogicalAngle,
             "%s: clamping %f to %f", __func__, physicalToLogicalAngle, angle);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     mDisplayOrientation = angle;
     if (mPoseController != nullptr) {
         // This turns on the rate-limiter.
@@ -728,7 +735,7 @@
     // It is possible due to numerical inaccuracies to exceed the boundaries of 0 to 2 * M_PI.
     ALOGI_IF(angle != hingeAngle,
             "%s: clamping %f to %f", __func__, hingeAngle, angle);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     mHingeAngle = angle;
     if (mEngine != nullptr) {
         setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{angle});
@@ -739,7 +746,7 @@
 Status Spatializer::setFoldState(bool folded) {
     ALOGV("%s foldState %d", __func__, (int)folded);
     mLocalLog.log("%s with %d", __func__, (int)folded);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     mFoldedState = folded;
     if (mEngine != nullptr) {
         // we don't suppress multiple calls with the same folded state - that's
@@ -761,7 +768,7 @@
 Status Spatializer::registerHeadTrackingCallback(
         const sp<media::ISpatializerHeadTrackingCallback>& callback) {
     ALOGV("%s callback %p", __func__, callback.get());
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
@@ -771,7 +778,7 @@
 
 Status Spatializer::setParameter(int key, const std::vector<unsigned char>& value) {
     ALOGV("%s key %d", __func__, key);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     status_t status = INVALID_OPERATION;
     if (mEngine != nullptr) {
         status = setEffectParameter_l(key, value);
@@ -785,7 +792,7 @@
     if (value == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     status_t status = INVALID_OPERATION;
     if (mEngine != nullptr) {
         ALOGV("%s key %d mEngine %p", __func__, key, mEngine.get());
@@ -799,7 +806,7 @@
     if (output == nullptr) {
         binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *output = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_io_handle_t_int32_t(mOutput));
     ALOGV("%s got output %d", __func__, *output);
     return Status::ok();
@@ -837,7 +844,7 @@
     ALOGV("%s", __func__);
     sp<media::ISpatializerHeadTrackingCallback> callback;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         callback = mHeadTrackingCallback;
         if (mEngine != nullptr) {
             setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
@@ -865,7 +872,7 @@
     sp<media::ISpatializerHeadTrackingCallback> callback;
     HeadTracking::Mode spatializerMode;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         if (!mSupportsHeadTracking) {
             spatializerMode = HeadTracking::Mode::DISABLED;
         } else {
@@ -932,7 +939,7 @@
     sp<media::INativeSpatializerCallback> callback;
 
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
         mLocalLog.log("%s with output %d tracks %zu (mOutput %d)", __func__, (int)output,
                       numActiveTracks, (int)mOutput);
@@ -998,7 +1005,7 @@
     sp<media::INativeSpatializerCallback> callback;
 
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         mLocalLog.log("%s with output %d tracks %zu", __func__, (int)mOutput, mNumActiveTracks);
         ALOGV("%s mOutput %d", __func__, (int)mOutput);
         if (mOutput == AUDIO_IO_HANDLE_NONE) {
@@ -1032,7 +1039,7 @@
 
 void Spatializer::onSupportedLatencyModesChangedMsg(
         audio_io_handle_t output, std::vector<audio_latency_mode_t>&& modes) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     ALOGV("%s output %d mOutput %d num modes %zu",
             __func__, (int)output, (int)mOutput, modes.size());
     if (output == mOutput) {
@@ -1043,7 +1050,7 @@
 }
 
 void Spatializer::updateActiveTracks(size_t numActiveTracks) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mNumActiveTracks != numActiveTracks) {
         mLocalLog.log("%s from %zu to %zu", __func__, mNumActiveTracks, numActiveTracks);
         mNumActiveTracks = numActiveTracks;
@@ -1174,7 +1181,7 @@
 
 void Spatializer::calculateHeadPose() {
     ALOGV("%s", __func__);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mPoseController != nullptr) {
         mPoseController->calculateAsync();
     }
@@ -1193,7 +1200,7 @@
     bool needUnlock = false;
 
     prefixSpace += ' ';
-    if (!mLock.try_lock()) {
+    if (!mMutex.try_lock()) {
         // dumpsys even try_lock failed, information dump can be useful although may not accurate
         ss.append(prefixSpace).append("try_lock failed, dumpsys below maybe INACCURATE!\n");
     } else {
@@ -1239,6 +1246,10 @@
     base::StringAppendF(&ss, "%sDisplayOrientation: %f\n", prefixSpace.c_str(),
                         mDisplayOrientation);
 
+    // 4. Show flag or property state.
+    base::StringAppendF(&ss, "%sStereo Spatialization: %s\n", prefixSpace.c_str(),
+            com_android_media_audio_stereo_spatialization() ? "true" : "false");
+
     ss.append(prefixSpace + "CommandLog:\n");
     ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), mMaxLocalLogLine);
 
@@ -1258,7 +1269,7 @@
     }
 
     if (needUnlock) {
-        mLock.unlock();
+        mMutex.unlock();
     }
     return ss;
 }
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 123517e..24788dc 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -23,6 +23,7 @@
 #include <android/media/audio/common/AudioLatencyMode.h>
 #include <android/media/audio/common/HeadTracking.h>
 #include <android/media/audio/common/Spatialization.h>
+#include <audio_utils/mutex.h>
 #include <audio_utils/SimpleLog.h>
 #include <math.h>
 #include <media/AudioEffect.h>
@@ -148,7 +149,7 @@
 
     /** Level getter for use by local classes. */
     media::audio::common::Spatialization::Level getLevel() const {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         return mLevel;
     }
 
@@ -161,7 +162,7 @@
      */
     audio_io_handle_t detachOutput();
     /** Returns the output stream the spatializer is attached to. */
-    audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
+    audio_io_handle_t getOutput() const { audio_utils::lock_guard lock(mMutex); return mOutput; }
 
     void updateActiveTracks(size_t numActiveTracks);
 
@@ -261,7 +262,7 @@
      *  according to values vector size.
      */
     template<typename T>
-    status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mLock) {
+    status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mMutex) {
         static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
 
         uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values.size()];
@@ -286,7 +287,7 @@
      * The variant is for compound parameters with two values of different base types
      */
     template<typename P1, typename P2>
-    status_t setEffectParameter_l(uint32_t type, const P1 val1, const P2 val2) REQUIRES(mLock) {
+    status_t setEffectParameter_l(uint32_t type, const P1 val1, const P2 val2) REQUIRES(mMutex) {
         static_assert(sizeof(P1) <= sizeof(uint32_t), "The size of P1 must less than 32 bits");
         static_assert(sizeof(P2) <= sizeof(uint32_t), "The size of P2 must less than 32 bits");
 
@@ -314,7 +315,7 @@
      * by specifying values vector size.
      */
     template<typename T>
-    status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mLock) {
+    status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mMutex) {
         static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
 
         uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values->size()];
@@ -345,7 +346,7 @@
      * The variant is for compound parameters with two values of different base types
      */
     template<typename P1, typename P2>
-    status_t getEffectParameter_l(uint32_t type, P1 *val1, P2 *val2) REQUIRES(mLock) {
+    status_t getEffectParameter_l(uint32_t type, P1 *val1, P2 *val2) REQUIRES(mMutex) {
         static_assert(sizeof(P1) <= sizeof(uint32_t), "The size of P1 must less than 32 bits");
         static_assert(sizeof(P2) <= sizeof(uint32_t), "The size of P2 must less than 32 bits");
 
@@ -375,25 +376,25 @@
      * spatializer state and playback activity and configures the pose controller
      * accordingly.
      */
-    void checkSensorsState_l() REQUIRES(mLock);
+    void checkSensorsState_l() REQUIRES(mMutex);
 
     /**
      * Checks if the head pose controller should be created or destroyed according
      * to desired head tracking mode.
      */
-    void checkPoseController_l() REQUIRES(mLock);
+    void checkPoseController_l() REQUIRES(mMutex);
 
     /**
      * Checks if the spatializer effect should be enabled based on
      * playback activity and requested level.
      */
-    void checkEngineState_l() REQUIRES(mLock);
+    void checkEngineState_l() REQUIRES(mMutex);
 
     /**
      * Reset head tracking mode and recenter pose in engine: Called when the head tracking
      * is disabled.
      */
-    void resetEngineHeadPose_l() REQUIRES(mLock);
+    void resetEngineHeadPose_l() REQUIRES(mMutex);
 
     /** Read bluetooth.core.le.dsa_transport_preference property and populate the ordered list of
      * preferred low latency modes in mOrderedLowLatencyModes.
@@ -406,7 +407,7 @@
      * Note: Because MODE_FREE is not in mOrderedLowLatencyModes, it will always be at
      * the end of the list.
      */
-    void sortSupportedLatencyModes_l() REQUIRES(mLock);
+    void sortSupportedLatencyModes_l() REQUIRES(mMutex);
 
     /**
      * Called after enabling head tracking in the spatializer engine to indicate which
@@ -415,14 +416,14 @@
      * When the connection mode is direct to the sensor, the sensor ID is also communicated
      * to the spatializer engine.
      */
-    void setEngineHeadtrackingConnectionMode_l() REQUIRES(mLock);
+    void setEngineHeadtrackingConnectionMode_l() REQUIRES(mMutex);
 
     /**
      * Select the desired head tracking connection mode for the spatializer engine among the list
      * stored in mSupportedHeadtrackingConnectionModes at init time.
      * Also returns the desired low latency mode according to selected connection mode.
      */
-    audio_latency_mode_t selectHeadtrackingConnectionMode_l() REQUIRES(mLock);
+    audio_latency_mode_t selectHeadtrackingConnectionMode_l() REQUIRES(mMutex);
 
     /** Effect engine descriptor */
     const effect_descriptor_t mEngineDescriptor;
@@ -435,48 +436,48 @@
     const std::string mMetricsId = kDefaultMetricsId;
 
     /** Mutex protecting internal state */
-    mutable std::mutex mLock;
+    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kSpatializer_Mutex};
 
     /** Client AudioEffect for the engine */
-    sp<AudioEffect> mEngine GUARDED_BY(mLock);
+    sp<AudioEffect> mEngine GUARDED_BY(mMutex);
     /** Output stream the spatializer mixer thread is attached to */
-    audio_io_handle_t mOutput GUARDED_BY(mLock) = AUDIO_IO_HANDLE_NONE;
+    audio_io_handle_t mOutput GUARDED_BY(mMutex) = AUDIO_IO_HANDLE_NONE;
 
     /** Callback interface to the client (AudioService) controlling this`Spatializer */
-    sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mLock);
+    sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mMutex);
 
     /** Callback interface for head tracking */
-    sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mLock);
+    sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mMutex);
 
     /** Requested spatialization level */
-    media::audio::common::Spatialization::Level mLevel GUARDED_BY(mLock) =
+    media::audio::common::Spatialization::Level mLevel GUARDED_BY(mMutex) =
             media::audio::common::Spatialization::Level::NONE;
 
     /** Control logic for head-tracking, etc. */
-    std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mLock);
+    std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mMutex);
 
     /** Last requested head tracking mode */
-    media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mLock)
+    media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mMutex)
             = media::HeadTrackingMode::STATIC;
 
     /** Last-reported actual head-tracking mode. */
-    media::audio::common::HeadTracking::Mode mActualHeadTrackingMode GUARDED_BY(mLock)
+    media::audio::common::HeadTracking::Mode mActualHeadTrackingMode GUARDED_BY(mMutex)
             = media::audio::common::HeadTracking::Mode::DISABLED;
 
     /** Selected Head pose sensor */
-    int32_t mHeadSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+    int32_t mHeadSensor GUARDED_BY(mMutex) = SpatializerPoseController::INVALID_SENSOR;
 
     /** Selected Screen pose sensor */
-    int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+    int32_t mScreenSensor GUARDED_BY(mMutex) = SpatializerPoseController::INVALID_SENSOR;
 
     /** Last display orientation received */
-    float mDisplayOrientation GUARDED_BY(mLock) = 0.f;  // aligned to natural up orientation.
+    float mDisplayOrientation GUARDED_BY(mMutex) = 0.f;  // aligned to natural up orientation.
 
     /** Last folded state */
-    bool mFoldedState GUARDED_BY(mLock) = false;  // foldable: true means folded.
+    bool mFoldedState GUARDED_BY(mMutex) = false;  // foldable: true means folded.
 
     /** Last hinge angle */
-    float mHingeAngle GUARDED_BY(mLock) = 0.f;  // foldable: 0.f is closed, M_PI flat open.
+    float mHingeAngle GUARDED_BY(mMutex) = 0.f;  // foldable: 0.f is closed, M_PI flat open.
 
     std::vector<media::audio::common::Spatialization::Level> mLevels;
     std::vector<media::audio::common::HeadTracking::Mode> mHeadTrackingModes;
@@ -497,8 +498,8 @@
     sp<ALooper> mLooper;
     sp<EngineCallbackHandler> mHandler;
 
-    size_t mNumActiveTracks GUARDED_BY(mLock) = 0;
-    std::vector<audio_latency_mode_t> mSupportedLatencyModes GUARDED_BY(mLock);
+    size_t mNumActiveTracks GUARDED_BY(mMutex) = 0;
+    std::vector<audio_latency_mode_t> mSupportedLatencyModes GUARDED_BY(mMutex);
     /** preference order for low latency modes according to persist.bluetooth.hid.transport */
     std::vector<audio_latency_mode_t> mOrderedLowLatencyModes;
     /** string to latency mode map used to parse bluetooth.core.le.dsa_transport_preference */
@@ -514,10 +515,10 @@
      * Dump to local log with max/average pose angle every mPoseRecordThreshold.
      */
     // Record one log line per second (up to mMaxLocalLogLine) to capture most recent sensor data.
-    media::VectorRecorder mPoseRecorder GUARDED_BY(mLock) {
+    media::VectorRecorder mPoseRecorder GUARDED_BY(mMutex) {
         6 /* vectorSize */, std::chrono::seconds(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
     // Record one log line per minute (up to mMaxLocalLogLine) to capture durable sensor data.
-    media::VectorRecorder mPoseDurableRecorder  GUARDED_BY(mLock) {
+    media::VectorRecorder mPoseDurableRecorder GUARDED_BY(mMutex) {
         6 /* vectorSize */, std::chrono::minutes(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
 };  // Spatializer
 
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index a4a0cd4..34bd3b4 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -33,11 +34,14 @@
         "libutils",
         "libcutils",
         "libxml2",
+        "server_configurable_flags",
     ],
 
     static_libs: [
+        "android.media.audiopolicy-aconfig-cc",
         "audioclient-types-aidl-cpp",
         "libaudiopolicycomponents",
+        "libflagtest",
         "libgmock",
     ],
 
@@ -49,7 +53,7 @@
 
     srcs: ["audiopolicymanager_tests.cpp"],
 
-    data: [":audiopolicytest_configuration_files",],
+    data: [":audiopolicytest_configuration_files"],
 
     cflags: [
         "-Werror",
@@ -63,7 +67,6 @@
 
 }
 
-
 cc_test {
     name: "audio_health_tests",
 
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index 31ee252..aa7c9cd 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -31,6 +31,7 @@
     using AudioPolicyManager::getConfig;
     using AudioPolicyManager::initialize;
     using AudioPolicyManager::getOutputs;
+    using AudioPolicyManager::getInputs;
     using AudioPolicyManager::getAvailableOutputDevices;
     using AudioPolicyManager::getAvailableInputDevices;
     using AudioPolicyManager::setSurroundFormatEnabled;
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 74d3474..e883e10 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -28,6 +28,8 @@
 #include <android-base/file.h>
 #include <android-base/properties.h>
 #include <android/content/AttributionSourceState.h>
+#include <android_media_audiopolicy.h>
+#include <flag_macros.h>
 #include <hardware/audio_effect.h>
 #include <media/AudioPolicy.h>
 #include <media/PatchBuilder.h>
@@ -43,6 +45,7 @@
 
 using namespace android;
 using testing::UnorderedElementsAre;
+using testing::IsEmpty;
 using android::content::AttributionSourceState;
 
 namespace {
@@ -92,6 +95,12 @@
     return attributionSourceState;
 }
 
+bool equals(const audio_config_base_t& config1, const audio_config_base_t& config2) {
+    return config1.format == config2.format
+            && config1.sample_rate == config2.sample_rate
+            && config1.channel_mask == config2.channel_mask;
+}
+
 } // namespace
 
 TEST(AudioPolicyConfigTest, DefaultConfigForTestsIsEmpty) {
@@ -1266,6 +1275,53 @@
                                                            "", "", AUDIO_FORMAT_LDAC));
 }
 
+TEST_F(AudioPolicyManagerTestWithConfigurationFile, PreferExactConfigForInput) {
+    const audio_channel_mask_t deviceChannelMask = AUDIO_CHANNEL_IN_3POINT1;
+    mClient->addSupportedFormat(AUDIO_FORMAT_PCM_16_BIT);
+    mClient->addSupportedChannelMask(deviceChannelMask);
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(AUDIO_DEVICE_IN_USB_DEVICE,
+                                                           AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                                                           "", "", AUDIO_FORMAT_DEFAULT));
+
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                               AUDIO_SOURCE_VOICE_COMMUNICATION,AUDIO_FLAG_NONE, ""};
+    AudioPolicyInterface::input_type_t inputType;
+    audio_io_handle_t input = AUDIO_PORT_HANDLE_NONE;
+    AttributionSourceState attributionSource = createAttributionSourceState(/*uid=*/ 0);
+    audio_config_base_t requestedConfig = {
+            .channel_mask = AUDIO_CHANNEL_IN_STEREO,
+            .format = AUDIO_FORMAT_PCM_16_BIT,
+            .sample_rate = 48000
+    };
+    audio_config_base_t config = requestedConfig;
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    ASSERT_EQ(OK, mManager->getInputForAttr(
+            &attr, &input, 1 /*riid*/, AUDIO_SESSION_NONE, attributionSource, &config,
+            AUDIO_INPUT_FLAG_NONE,
+            &selectedDeviceId, &inputType, &portId));
+    ASSERT_NE(AUDIO_PORT_HANDLE_NONE, portId);
+    ASSERT_TRUE(equals(requestedConfig, config));
+
+    attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+            AUDIO_SOURCE_VOICE_COMMUNICATION, AUDIO_FLAG_NONE, ""};
+    requestedConfig.channel_mask = deviceChannelMask;
+    config = requestedConfig;
+    selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    input = AUDIO_PORT_HANDLE_NONE;
+    portId = AUDIO_PORT_HANDLE_NONE;
+    ASSERT_EQ(OK, mManager->getInputForAttr(
+            &attr, &input, 1 /*riid*/, AUDIO_SESSION_NONE, attributionSource, &config,
+            AUDIO_INPUT_FLAG_NONE,
+            &selectedDeviceId, &inputType, &portId));
+    ASSERT_NE(AUDIO_PORT_HANDLE_NONE, portId);
+    ASSERT_TRUE(equals(requestedConfig, config));
+
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(AUDIO_DEVICE_IN_USB_DEVICE,
+                                                           AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                                           "", "", AUDIO_FORMAT_DEFAULT));
+}
+
 class AudioPolicyManagerTestDynamicPolicy : public AudioPolicyManagerTestWithConfigurationFile {
 protected:
     void TearDown() override;
@@ -1273,6 +1329,8 @@
     status_t addPolicyMix(int mixType, int mixFlag, audio_devices_t deviceType,
             std::string mixAddress, const audio_config_t& audioConfig,
             const std::vector<AudioMixMatchCriterion>& matchCriteria);
+
+    std::vector<AudioMix> getRegisteredPolicyMixes();
     void clearPolicyMix();
     void addPolicyMixAndStartInputForLoopback(
             int mixType, int mixFlag, audio_devices_t deviceType, std::string mixAddress,
@@ -1317,6 +1375,15 @@
     return ret;
 }
 
+std::vector<AudioMix> AudioPolicyManagerTestDynamicPolicy::getRegisteredPolicyMixes() {
+    std::vector<AudioMix> audioMixes;
+    if (mManager != nullptr) {
+        status_t ret = mManager->getRegisteredPolicyMixes(audioMixes);
+        EXPECT_EQ(NO_ERROR, ret);
+    }
+    return audioMixes;
+}
+
 void AudioPolicyManagerTestDynamicPolicy::clearPolicyMix() {
     if (mManager != nullptr) {
         mManager->stopInput(mLoopbackInputPortId);
@@ -1470,6 +1537,50 @@
     ASSERT_EQ(INVALID_OPERATION, ret);
 }
 
+TEST_F_WITH_FLAGS(
+        AudioPolicyManagerTestDynamicPolicy,
+        GetRegisteredPolicyMixes,
+        REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(android::media::audiopolicy, audio_mix_test_api))
+) {
+    std::vector<AudioMix> mixes = getRegisteredPolicyMixes();
+    EXPECT_THAT(mixes, IsEmpty());
+}
+
+TEST_F_WITH_FLAGS(AudioPolicyManagerTestDynamicPolicy,
+        AddPolicyMixAndVerifyGetRegisteredPolicyMixes,
+        REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(android::media::audiopolicy, audio_mix_test_api))
+) {
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = k48000SamplingRate;
+
+    std::vector<AudioMixMatchCriterion> mixMatchCriteria = {
+            createUidCriterion(/*uid=*/42),
+            createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/true)};
+    status_t ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+                                AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig,
+                                mixMatchCriteria);
+    ASSERT_EQ(NO_ERROR, ret);
+
+    std::vector<AudioMix> mixes = getRegisteredPolicyMixes();
+    ASSERT_EQ(mixes.size(), 1);
+
+    const AudioMix& mix = mixes[0];
+    ASSERT_EQ(mix.mCriteria.size(), mixMatchCriteria.size());
+    for (uint32_t i = 0; i < mixMatchCriteria.size(); i++) {
+        EXPECT_EQ(mix.mCriteria[i].mRule, mixMatchCriteria[i].mRule);
+        EXPECT_EQ(mix.mCriteria[i].mValue.mUsage, mixMatchCriteria[i].mValue.mUsage);
+    }
+    EXPECT_EQ(mix.mDeviceType, AUDIO_DEVICE_OUT_REMOTE_SUBMIX);
+    EXPECT_EQ(mix.mRouteFlags, MIX_ROUTE_FLAG_LOOP_BACK);
+    EXPECT_EQ(mix.mMixType, MIX_TYPE_PLAYERS);
+    EXPECT_EQ(mix.mFormat.channel_mask, audioConfig.channel_mask);
+    EXPECT_EQ(mix.mFormat.format, audioConfig.format);
+    EXPECT_EQ(mix.mFormat.sample_rate, audioConfig.sample_rate);
+    EXPECT_EQ(mix.mFormat.frame_count, audioConfig.frame_count);
+}
+
 class AudioPolicyManagerTestForHdmi
         : public AudioPolicyManagerTestWithConfigurationFile,
           public testing::WithParamInterface<audio_format_t> {
diff --git a/services/audiopolicy/tests/resources/Android.bp b/services/audiopolicy/tests/resources/Android.bp
index 5e71210..43e2e39 100644
--- a/services/audiopolicy/tests/resources/Android.bp
+++ b/services/audiopolicy/tests/resources/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
index 4efdf8a..1a299c6 100644
--- a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -65,6 +65,7 @@
                         samplingRates="48000"
                         channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
                 </mixPort>
+                <mixPort name="hifi_input" role="sink" />
             </mixPorts>
             <devicePorts>
                 <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
@@ -111,6 +112,8 @@
                        sources="primary output,hifi_output,mmap_no_irq_out"/>
                 <route type="mix" sink="mixport_bus_input"
                     sources="BUS Device In"/>
+                <route type="mix" sink="hifi_input"
+                        sources="USB Device In" />
             </routes>
         </module>
 
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 4883a09..5b76bb0 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -101,7 +101,7 @@
         "android.frameworks.cameraservice.device-V2-ndk",
         "android.hardware.camera.common-V1-ndk",
         "android.hardware.camera.device-V3-ndk",
-        "android.hardware.camera.metadata-V2-ndk",
+        "android.hardware.camera.metadata-V3-ndk",
         "android.hardware.camera.provider@2.4",
         "android.hardware.camera.provider@2.5",
         "android.hardware.camera.provider@2.6",
@@ -112,6 +112,7 @@
         "libcameraservice_device_independent",
         "libdynamic_depth",
         "libprocessinfoservice_aidl",
+        "libvirtualdevicebuildflags",
         "media_permission-aidl-cpp",
     ],
 }
@@ -195,6 +196,7 @@
         "utils/SessionStatsBuilder.cpp",
         "utils/TagMonitor.cpp",
         "utils/LatencyHistogram.cpp",
+        "utils/Utils.cpp",
     ],
 
     header_libs: [
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 725f1eb..7202532 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -135,6 +135,8 @@
 static const std::string sSystemCameraPermission("android.permission.SYSTEM_CAMERA");
 static const std::string sCameraHeadlessSystemUserPermission(
         "android.permission.CAMERA_HEADLESS_SYSTEM_USER");
+static const std::string sCameraPrivacyAllowlistPermission(
+        "android.permission.CAMERA_PRIVACY_ALLOWLIST");
 static const std::string
         sCameraSendSystemEventsPermission("android.permission.CAMERA_SEND_SYSTEM_EVENTS");
 static const std::string sCameraOpenCloseListenerPermission(
@@ -820,6 +822,14 @@
             std::string(), AppOpsManager::OP_NONE);
 }
 
+bool CameraService::hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const{
+    AttributionSourceState attributionSource{};
+    attributionSource.pid = callingPid;
+    attributionSource.uid = callingUid;
+    return checkPermission(std::string(), sCameraPrivacyAllowlistPermission, attributionSource,
+            std::string(), AppOpsManager::OP_NONE);
+}
+
 Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
     ATRACE_CALL();
     Mutex::Autolock l(mServiceLock);
@@ -906,13 +916,6 @@
                 "request for system only device %s: ", cameraId.c_str());
     }
 
-    // Check for camera permissions
-    if (!hasCameraPermissions()) {
-        return STATUS_ERROR(ERROR_PERMISSION_DENIED,
-                "android.permission.CAMERA needed to call"
-                "createDefaultRequest");
-    }
-
     CameraMetadata metadata;
     status_t err = mCameraProviderManager->createDefaultRequest(cameraId, tempId, &metadata);
     if (err == OK) {
@@ -961,13 +964,6 @@
                 cameraId.c_str());
     }
 
-    // Check for camera permissions
-    if (!hasCameraPermissions()) {
-        return STATUS_ERROR(ERROR_PERMISSION_DENIED,
-                "android.permission.CAMERA needed to call"
-                "isSessionConfigurationWithParametersSupported");
-    }
-
     *supported = false;
     status_t ret = mCameraProviderManager->isSessionConfigurationSupported(cameraId.c_str(),
             sessionConfiguration, /*mOverrideForPerfClass*/false, /*checkSessionParams*/true,
@@ -998,6 +994,61 @@
     return res;
 }
 
+Status CameraService::getSessionCharacteristics(const std::string& unresolvedCameraId,
+                                                int targetSdkVersion, bool overrideToPortrait,
+                                                const SessionConfiguration& sessionConfiguration,
+                                                /*out*/ CameraMetadata* outMetadata) {
+    ATRACE_CALL();
+
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
+        logServiceError("Camera subsystem is not available", ERROR_DISCONNECTED);
+        return STATUS_ERROR(ERROR_DISCONNECTED, "Camera subsystem is not available");
+    }
+
+    const std::string cameraId =
+            resolveCameraId(unresolvedCameraId, CameraThreadState::getCallingUid());
+
+    if (outMetadata == nullptr) {
+        std::string msg =
+                fmt::sprintf("Camera %s: Invalid 'outMetadata' input!", unresolvedCameraId.c_str());
+        ALOGE("%s: %s", __FUNCTION__, msg.c_str());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.c_str());
+    }
+
+    bool overrideForPerfClass = SessionConfigurationUtils::targetPerfClassPrimaryCamera(
+            mPerfClassPrimaryCameraIds, cameraId, targetSdkVersion);
+
+    status_t ret = mCameraProviderManager->getSessionCharacteristics(
+            cameraId, sessionConfiguration, overrideForPerfClass, overrideToPortrait, outMetadata);
+
+    // TODO(b/303645857): Remove fingerprintable metadata if the caller process does not have
+    //                    camera access permission.
+
+    Status res = Status::ok();
+    switch (ret) {
+        case OK:
+            // Expected, no handling needed.
+            break;
+        case INVALID_OPERATION: {
+                std::string msg = fmt::sprintf(
+                        "Camera %s: Session characteristics query not supported!",
+                        cameraId.c_str());
+                ALOGD("%s: %s", __FUNCTION__, msg.c_str());
+                res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.c_str());
+            }
+            break;
+        default: {
+                std::string msg = fmt::sprintf("Camera %s: Error: %s (%d)", cameraId.c_str(),
+                                               strerror(-ret), ret);
+                ALOGE("%s: %s", __FUNCTION__, msg.c_str());
+                res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.c_str());
+            }
+    }
+
+    return res;
+}
+
 Status CameraService::parseCameraIdRemapping(
         const hardware::CameraIdRemapping& cameraIdRemapping,
         /* out */ TCameraIdRemapping* cameraIdRemappingMap) {
@@ -2345,6 +2396,39 @@
     return ret;
 }
 
+bool CameraService::isCameraPrivacyEnabled(const String16& packageName, const std::string& cam_id,
+        int callingPid, int callingUid) {
+    if (!isAutomotiveDevice()) {
+        return mSensorPrivacyPolicy->isCameraPrivacyEnabled();
+    }
+
+    // Automotive privileged client AID_AUTOMOTIVE_EVS using exterior system camera for
+    // safety-critical use cases cannot be disabled and are exempt from camera privacy policy.
+    if ((isAutomotivePrivilegedClient(callingUid) && isAutomotiveExteriorSystemCamera(cam_id))) {
+        ALOGI("Camera privacy cannot be enabled for automotive privileged client %d "
+                "using camera %s", callingUid, cam_id.c_str());
+        return false;
+    }
+
+    if (mSensorPrivacyPolicy->isCameraPrivacyEnabled(packageName)) {
+        return true;
+    } else if (mSensorPrivacyPolicy->getCameraPrivacyState() == SensorPrivacyManager::DISABLED) {
+        return false;
+    } else if ((mSensorPrivacyPolicy->getCameraPrivacyState()
+            == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_HELPFUL_APPS) ||
+            (mSensorPrivacyPolicy->getCameraPrivacyState()
+            == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_REQUIRED_APPS) ||
+            (mSensorPrivacyPolicy->getCameraPrivacyState() ==
+            SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_APPS)) {
+        if (hasPermissionsForCameraPrivacyAllowlist(callingPid, callingUid)) {
+            return false;
+        } else {
+            return true;
+        }
+    }
+    return false;
+}
+
 std::string CameraService::getPackageNameFromUid(int clientUid) {
     std::string packageName("");
 
@@ -2617,38 +2701,39 @@
             }
         }
 
-        // Automotive privileged client AID_AUTOMOTIVE_EVS using exterior system camera for use
-        // cases such as rear view and surround view cannot be disabled and are exempt from camera
-        // privacy policy.
-        if ((!isAutomotivePrivilegedClient(packageUid) ||
-                !isAutomotiveExteriorSystemCamera(cameraId))) {
+        bool isCameraPrivacyEnabled;
+        if (flags::camera_privacy_allowlist()) {
             // Set camera muting behavior.
-            bool isCameraPrivacyEnabled =
+            isCameraPrivacyEnabled = this->isCameraPrivacyEnabled(
+                    toString16(client->getPackageName()), cameraId, packagePid, packageUid);
+        } else {
+            isCameraPrivacyEnabled =
                     mSensorPrivacyPolicy->isCameraPrivacyEnabled();
-            if (client->supportsCameraMute()) {
-                client->setCameraMute(
-                        mOverrideCameraMuteMode || isCameraPrivacyEnabled);
-            } else if (isCameraPrivacyEnabled) {
-                // no camera mute supported, but privacy is on! => disconnect
-                ALOGI("Camera mute not supported for package: %s, camera id: %s",
-                        client->getPackageName().c_str(), cameraId.c_str());
-                // Do not hold mServiceLock while disconnecting clients, but
-                // retain the condition blocking other clients from connecting
-                // in mServiceLockWrapper if held.
-                mServiceLock.unlock();
-                // Clear caller identity temporarily so client disconnect PID
-                // checks work correctly
-                int64_t token = CameraThreadState::clearCallingIdentity();
-                // Note AppOp to trigger the "Unblock" dialog
-                client->noteAppOp();
-                client->disconnect();
-                CameraThreadState::restoreCallingIdentity(token);
-                // Reacquire mServiceLock
-                mServiceLock.lock();
+        }
 
-                return STATUS_ERROR_FMT(ERROR_DISABLED,
-                        "Camera \"%s\" disabled due to camera mute", cameraId.c_str());
-            }
+        if (client->supportsCameraMute()) {
+            client->setCameraMute(
+                    mOverrideCameraMuteMode || isCameraPrivacyEnabled);
+        } else if (isCameraPrivacyEnabled) {
+            // no camera mute supported, but privacy is on! => disconnect
+            ALOGI("Camera mute not supported for package: %s, camera id: %s",
+                    client->getPackageName().c_str(), cameraId.c_str());
+            // Do not hold mServiceLock while disconnecting clients, but
+            // retain the condition blocking other clients from connecting
+            // in mServiceLockWrapper if held.
+            mServiceLock.unlock();
+            // Clear caller identity temporarily so client disconnect PID
+            // checks work correctly
+            int64_t token = CameraThreadState::clearCallingIdentity();
+            // Note AppOp to trigger the "Unblock" dialog
+            client->noteAppOp();
+            client->disconnect();
+            CameraThreadState::restoreCallingIdentity(token);
+            // Reacquire mServiceLock
+            mServiceLock.lock();
+
+            return STATUS_ERROR_FMT(ERROR_DISABLED,
+                    "Camera \"%s\" disabled due to camera mute", cameraId.c_str());
         }
 
         if (shimUpdateOnly) {
@@ -4169,8 +4254,15 @@
         // return MODE_IGNORED. Do not treat such case as error.
         bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid,
                 mClientPackageName);
-        bool isCameraPrivacyEnabled =
+
+        bool isCameraPrivacyEnabled;
+        if (flags::camera_privacy_allowlist()) {
+            isCameraPrivacyEnabled = sCameraService->isCameraPrivacyEnabled(
+                    toString16(mClientPackageName), std::string(), mClientPid, mClientUid);
+        } else {
+            isCameraPrivacyEnabled =
                 sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled();
+        }
         // We don't want to return EACCESS if the CameraPrivacy is enabled.
         // We prefer to successfully open the camera and perform camera muting
         // or blocking in connectHelper as handleAppOpMode can be called before the
@@ -4357,11 +4449,19 @@
         block();
     } else if (res == AppOpsManager::MODE_IGNORED) {
         bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid, mClientPackageName);
-        bool isCameraPrivacyEnabled =
+
+        bool isCameraPrivacyEnabled;
+        if (flags::camera_privacy_allowlist()) {
+            isCameraPrivacyEnabled = sCameraService->isCameraPrivacyEnabled(
+                    toString16(mClientPackageName),std::string(),mClientPid,mClientUid);
+        } else {
+            isCameraPrivacyEnabled =
                 sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled();
-        ALOGI("Camera %s: Access for \"%s\" has been restricted, isUidTrusted %d, isUidActive %d",
-                mCameraIdStr.c_str(), mClientPackageName.c_str(),
-                mUidIsTrusted, isUidActive);
+        }
+
+        ALOGI("Camera %s: Access for \"%s\" has been restricted, isUidTrusted %d, isUidActive %d"
+                " isCameraPrivacyEnabled %d", mCameraIdStr.c_str(), mClientPackageName.c_str(),
+                mUidIsTrusted, isUidActive, isCameraPrivacyEnabled);
         // If the calling Uid is trusted (a native service), or the client Uid is active (WAR for
         // b/175320666), the AppOpsManager could return MODE_IGNORED. Do not treat such cases as
         // error.
@@ -4741,7 +4841,15 @@
     }
     hasCameraPrivacyFeature(); // Called so the result is cached
     mSpm.addSensorPrivacyListener(this);
+    if (isAutomotiveDevice()) {
+        mSpm.addToggleSensorPrivacyListener(this);
+    }
     mSensorPrivacyEnabled = mSpm.isSensorPrivacyEnabled();
+    if (flags::camera_privacy_allowlist()) {
+        mCameraPrivacyState = mSpm.getToggleSensorPrivacyState(
+                SensorPrivacyManager::TOGGLE_TYPE_SOFTWARE,
+                SensorPrivacyManager::TOGGLE_SENSOR_CAMERA);
+    }
     status_t res = mSpm.linkToDeath(this);
     if (res == OK) {
         mRegistered = true;
@@ -4773,6 +4881,9 @@
 void CameraService::SensorPrivacyPolicy::unregisterSelf() {
     Mutex::Autolock _l(mSensorPrivacyLock);
     mSpm.removeSensorPrivacyListener(this);
+    if (isAutomotiveDevice()) {
+        mSpm.removeToggleSensorPrivacyListener(this);
+    }
     mSpm.unlinkToDeath(this);
     mRegistered = false;
     ALOGV("SensorPrivacyPolicy: Unregistered with SensorPrivacyManager");
@@ -4787,6 +4898,15 @@
     return mSensorPrivacyEnabled;
 }
 
+int CameraService::SensorPrivacyPolicy::getCameraPrivacyState() {
+    if (!mRegistered) {
+        registerWithSensorPrivacyManager();
+    }
+
+    Mutex::Autolock _l(mSensorPrivacyLock);
+    return mCameraPrivacyState;
+}
+
 bool CameraService::SensorPrivacyPolicy::isCameraPrivacyEnabled() {
     if (!hasCameraPrivacyFeature()) {
         return false;
@@ -4794,18 +4914,53 @@
     return mSpm.isToggleSensorPrivacyEnabled(SensorPrivacyManager::TOGGLE_SENSOR_CAMERA);
 }
 
+bool CameraService::SensorPrivacyPolicy::isCameraPrivacyEnabled(const String16& packageName) {
+    if (!hasCameraPrivacyFeature()) {
+        return SensorPrivacyManager::DISABLED;
+    }
+    return mSpm.isCameraPrivacyEnabled(packageName);
+}
+
 binder::Status CameraService::SensorPrivacyPolicy::onSensorPrivacyChanged(
-    int toggleType __unused, int sensor __unused, bool enabled) {
+    int toggleType, int sensor, bool enabled) {
+    if ((toggleType == SensorPrivacyManager::TOGGLE_TYPE_UNKNOWN)
+            && (sensor == SensorPrivacyManager::TOGGLE_SENSOR_UNKNOWN)) {
+        {
+            Mutex::Autolock _l(mSensorPrivacyLock);
+            mSensorPrivacyEnabled = enabled;
+        }
+        // if sensor privacy is enabled then block all clients from accessing the camera
+        if (enabled) {
+            sp<CameraService> service = mService.promote();
+            if (service != nullptr) {
+                service->blockAllClients();
+            }
+        }
+    }
+    return binder::Status::ok();
+}
+
+binder::Status CameraService::SensorPrivacyPolicy::onSensorPrivacyStateChanged(
+    int, int sensor, int state) {
+    if (!flags::camera_privacy_allowlist()
+            || (sensor != SensorPrivacyManager::TOGGLE_SENSOR_CAMERA)) {
+        return binder::Status::ok();
+    }
     {
         Mutex::Autolock _l(mSensorPrivacyLock);
-        mSensorPrivacyEnabled = enabled;
+        mCameraPrivacyState = state;
+    }
+    sp<CameraService> service = mService.promote();
+    if (!service) {
+        return binder::Status::ok();
     }
     // if sensor privacy is enabled then block all clients from accessing the camera
-    if (enabled) {
-        sp<CameraService> service = mService.promote();
-        if (service != nullptr) {
-            service->blockAllClients();
-        }
+    if (state == SensorPrivacyManager::ENABLED) {
+        service->blockAllClients();
+    } else if ((state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_APPS)
+            || (state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_HELPFUL_APPS)
+            || (state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_REQUIRED_APPS)) {
+        service->blockPrivacyEnabledClients();
     }
     return binder::Status::ok();
 }
@@ -5676,6 +5831,23 @@
     }
 }
 
+void CameraService::blockPrivacyEnabledClients() {
+    const auto clients = mActiveClientManager.getAll();
+    for (auto& current : clients) {
+        if (current != nullptr) {
+            const auto basicClient = current->getValue();
+            if (basicClient.get() != nullptr) {
+                std::string pkgName = basicClient->getPackageName();
+                bool cameraPrivacyEnabled =
+                        mSensorPrivacyPolicy->isCameraPrivacyEnabled(toString16(pkgName));
+                if (cameraPrivacyEnabled) {
+                    basicClient->block();
+                }
+           }
+        }
+    }
+}
+
 // NOTE: This is a remote API - make sure all args are validated
 status_t CameraService::shellCommand(int in, int out, int err, const Vector<String16>& args) {
     if (!checkCallingPermission(toString16(sManageCameraPermission), nullptr, nullptr)) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 1487013..8822cd3 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -248,6 +248,11 @@
             /*out*/
             bool* supported);
 
+    virtual binder::Status getSessionCharacteristics(
+            const std::string& cameraId, int targetSdkVersion, bool overrideToPortrait,
+            const SessionConfiguration& sessionConfiguration,
+            /*out*/ CameraMetadata* outMetadata);
+
     // Extra permissions checks
     virtual status_t    onTransact(uint32_t code, const Parcel& data,
                                    Parcel* reply, uint32_t flags);
@@ -681,6 +686,9 @@
             int callingUid) const;
 
     bool hasCameraPermissions() const;
+
+    bool hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const;
+
    /**
      * Typesafe version of device status, containing both the HAL-layer and the service interface-
      * layer values.
@@ -868,16 +876,20 @@
             public virtual IServiceManager::LocalRegistrationCallback {
         public:
             explicit SensorPrivacyPolicy(wp<CameraService> service)
-                    : mService(service), mSensorPrivacyEnabled(false), mRegistered(false) {}
+                    : mService(service), mSensorPrivacyEnabled(false),
+                    mCameraPrivacyState(SensorPrivacyManager::DISABLED), mRegistered(false) {}
 
             void registerSelf();
             void unregisterSelf();
 
             bool isSensorPrivacyEnabled();
             bool isCameraPrivacyEnabled();
+            int getCameraPrivacyState();
+            bool isCameraPrivacyEnabled(const String16& packageName);
 
             binder::Status onSensorPrivacyChanged(int toggleType, int sensor,
                                                   bool enabled);
+            binder::Status onSensorPrivacyStateChanged(int toggleType, int sensor, int state);
 
             // Implementation of IServiceManager::LocalRegistrationCallback
             virtual void onServiceRegistration(const String16& name,
@@ -890,6 +902,7 @@
             wp<CameraService> mService;
             Mutex mSensorPrivacyLock;
             bool mSensorPrivacyEnabled;
+            int mCameraPrivacyState;
             bool mRegistered;
 
             bool hasCameraPrivacyFeature();
@@ -926,6 +939,9 @@
             const std::string& clientName, /*inout*/int& clientUid, /*inout*/int& clientPid,
             /*out*/int& originalClientPid) const;
 
+    bool isCameraPrivacyEnabled(const String16& packageName,const std::string& cameraId,
+           int clientPid, int ClientUid);
+
     // Handle active client evictions, and update service state.
     // Only call with with mServiceLock held.
     status_t handleEvictionsLocked(const std::string& cameraId, int clientPid,
@@ -1385,6 +1401,9 @@
     // Blocks all active clients.
     void blockAllClients();
 
+    // Blocks clients whose privacy is enabled.
+    void blockPrivacyEnabledClients();
+
     // Overrides the UID state as if it is idle
     status_t handleSetUidState(const Vector<String16>& args, int err);
 
diff --git a/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp b/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp
index 954cb8b..9e6a925 100644
--- a/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp
@@ -20,6 +20,7 @@
 #include <aidl/AidlUtils.h>
 #include <aidl/android/frameworks/cameraservice/device/CaptureMetadataInfo.h>
 #include <android-base/properties.h>
+#include <utils/Utils.h>
 
 namespace android::frameworks::cameraservice::device::implementation {
 
@@ -56,7 +57,7 @@
 AidlCameraDeviceUser::AidlCameraDeviceUser(const sp<UICameraDeviceUser>& deviceRemote):
       mDeviceRemote(deviceRemote) {
     mInitSuccess = initDevice();
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 
 bool AidlCameraDeviceUser::initDevice() {
diff --git a/services/camera/libcameraservice/aidl/AidlCameraService.cpp b/services/camera/libcameraservice/aidl/AidlCameraService.cpp
index 8cd7d1f..79dbfed 100644
--- a/services/camera/libcameraservice/aidl/AidlCameraService.cpp
+++ b/services/camera/libcameraservice/aidl/AidlCameraService.cpp
@@ -27,6 +27,7 @@
 #include <android/binder_manager.h>
 #include <binder/Status.h>
 #include <hidl/HidlTransportSupport.h>
+#include <utils/Utils.h>
 
 namespace android::frameworks::cameraservice::service::implementation {
 
@@ -79,7 +80,7 @@
 
 AidlCameraService::AidlCameraService(::android::CameraService* cameraService):
       mCameraService(cameraService) {
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 ScopedAStatus AidlCameraService::getCameraCharacteristics(const std::string& in_cameraId,
                                                           SCameraMetadata* _aidl_return) {
diff --git a/services/camera/libcameraservice/aidl/AidlUtils.cpp b/services/camera/libcameraservice/aidl/AidlUtils.cpp
index f5d68eb..f2d1414 100644
--- a/services/camera/libcameraservice/aidl/AidlUtils.cpp
+++ b/services/camera/libcameraservice/aidl/AidlUtils.cpp
@@ -310,8 +310,8 @@
 
 status_t filterVndkKeys(int vndkVersion, CameraMetadata &metadata, bool isStatic) {
     if (vndkVersion == __ANDROID_API_FUTURE__) {
-        // VNDK version in ro.vndk.version is a version code-name that
-        // corresponds to the current version.
+        // VNDK version derived from ro.board.api_level is a version code-name that
+        // corresponds to the current SDK version.
         return OK;
     }
     const auto &apiLevelToKeys =
diff --git a/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h b/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h
index e403b97..7965474 100644
--- a/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h
+++ b/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h
@@ -78,6 +78,7 @@
           ANDROID_CONTROL_AUTOFRAMING_AVAILABLE,
           ANDROID_CONTROL_AVAILABLE_SETTINGS_OVERRIDES,
           ANDROID_CONTROL_LOW_LIGHT_BOOST_INFO_LUMINANCE_RANGE,
+          ANDROID_EFV_PADDING_ZOOM_FACTOR_RANGE,
           ANDROID_FLASH_SINGLE_STRENGTH_DEFAULT_LEVEL,
           ANDROID_FLASH_SINGLE_STRENGTH_MAX_LEVEL,
           ANDROID_FLASH_TORCH_STRENGTH_DEFAULT_LEVEL,
@@ -112,6 +113,15 @@
           ANDROID_CONTROL_LOW_LIGHT_BOOST_STATE,
           ANDROID_CONTROL_SETTINGS_OVERRIDE,
           ANDROID_CONTROL_SETTINGS_OVERRIDING_FRAME_NUMBER,
+          ANDROID_EFV_AUTO_ZOOM,
+          ANDROID_EFV_AUTO_ZOOM_PADDING_REGION,
+          ANDROID_EFV_MAX_PADDING_ZOOM_FACTOR,
+          ANDROID_EFV_PADDING_REGION,
+          ANDROID_EFV_PADDING_ZOOM_FACTOR,
+          ANDROID_EFV_ROTATE_VIEWPORT,
+          ANDROID_EFV_STABILIZATION_MODE,
+          ANDROID_EFV_TARGET_COORDINATES,
+          ANDROID_EFV_TRANSLATE_VIEWPORT,
           ANDROID_EXTENSION_CURRENT_TYPE,
           ANDROID_EXTENSION_STRENGTH,
           ANDROID_FLASH_STRENGTH_LEVEL,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 3488629..508d487 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -777,60 +777,6 @@
     return res;
 }
 
-binder::Status CameraDeviceClient::getSessionCharacteristics(
-        const SessionConfiguration& sessionConfiguration,
-        /*out*/
-        hardware::camera2::impl::CameraMetadataNative* sessionCharacteristics) {
-    ATRACE_CALL();
-    binder::Status res;
-    status_t ret = OK;
-    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) {
-        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
-    }
-
-    auto operatingMode = sessionConfiguration.getOperatingMode();
-    res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
-            mCameraIdStr);
-    if (!res.isOk()) {
-        return res;
-    }
-
-    camera3::metadataGetter getMetadata = [this](const std::string &id,
-            bool /*overrideForPerfClass*/) {
-          return mDevice->infoPhysical(id);};
-    ret = mProviderManager->getSessionCharacteristics(mCameraIdStr.c_str(),
-            sessionConfiguration, mOverrideForPerfClass, getMetadata,
-            sessionCharacteristics);
-
-    switch (ret) {
-        case OK:
-            // Expected, do nothing.
-            break;
-        case INVALID_OPERATION: {
-                std::string msg = fmt::sprintf(
-                        "Camera %s: Session characteristics query not supported!",
-                        mCameraIdStr.c_str());
-                ALOGD("%s: %s", __FUNCTION__, msg.c_str());
-                res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.c_str());
-            }
-
-            break;
-        default: {
-                std::string msg = fmt::sprintf( "Camera %s: Error: %s (%d)", mCameraIdStr.c_str(),
-                        strerror(-ret), ret);
-                ALOGE("%s: %s", __FUNCTION__, msg.c_str());
-                res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                        msg.c_str());
-            }
-    }
-
-    return res;
-}
-
 binder::Status CameraDeviceClient::deleteStream(int streamId) {
     ATRACE_CALL();
     ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index c2f7f56..b2c9626 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -109,11 +109,6 @@
             /*out*/
             bool* streamStatus) override;
 
-    virtual binder::Status getSessionCharacteristics(
-            const SessionConfiguration& sessionConfiguration,
-            /*out*/
-            hardware::camera2::impl::CameraMetadataNative* sessionCharacteristics) override;
-
     // Returns -EBUSY if device is not idle or in error state
     virtual binder::Status deleteStream(int streamId) override;
 
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 1ba3de4..15e2755 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -33,6 +33,7 @@
 #include <future>
 #include <inttypes.h>
 #include <android_companion_virtualdevice_flags.h>
+#include <android_companion_virtualdevice_build_flags.h>
 #include <android/binder_manager.h>
 #include <android/hidl/manager/1.2/IServiceManager.h>
 #include <hidl/ServiceManagement.h>
@@ -139,7 +140,7 @@
 }
 
 std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-CameraProviderManager::AidlServiceInteractionProxyImpl::getAidlService(
+CameraProviderManager::AidlServiceInteractionProxyImpl::getService(
         const std::string& serviceName) {
     using aidl::android::hardware::camera::provider::ICameraProvider;
 
@@ -147,19 +148,35 @@
     if (flags::lazy_aidl_wait_for_service()) {
         binder = AServiceManager_waitForService(serviceName.c_str());
     } else {
-        binder = AServiceManager_getService(serviceName.c_str());
+        binder = AServiceManager_checkService(serviceName.c_str());
     }
 
     if (binder == nullptr) {
-        ALOGD("%s: AIDL Camera provider HAL '%s' is not actually available", __FUNCTION__,
-              serviceName.c_str());
+        ALOGE("%s: AIDL Camera provider HAL '%s' is not actually available, despite waiting "
+              "indefinitely?", __FUNCTION__, serviceName.c_str());
         return nullptr;
     }
     std::shared_ptr<ICameraProvider> interface =
             ICameraProvider::fromBinder(ndk::SpAIBinder(binder));
 
     return interface;
-};
+}
+
+std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+CameraProviderManager::AidlServiceInteractionProxyImpl::tryGetService(
+        const std::string& serviceName) {
+    using aidl::android::hardware::camera::provider::ICameraProvider;
+
+    std::shared_ptr<ICameraProvider> interface = ICameraProvider::fromBinder(
+                    ndk::SpAIBinder(AServiceManager_checkService(serviceName.c_str())));
+    if (interface == nullptr) {
+        ALOGD("%s: AIDL Camera provider HAL '%s' is not actually available", __FUNCTION__,
+              serviceName.c_str());
+        return nullptr;
+    }
+
+    return interface;
+}
 
 static std::string getFullAidlProviderName(const std::string instance) {
     std::string aidlHalServiceDescriptor =
@@ -442,19 +459,31 @@
     return OK;
 }
 
-status_t CameraProviderManager::getSessionCharacteristics(const std::string& id,
-        const SessionConfiguration &configuration, bool overrideForPerfClass,
-        metadataGetter getMetadata,
-        CameraMetadata* sessionCharacteristics /*out*/) const {
+status_t CameraProviderManager::getSessionCharacteristics(
+        const std::string& id, const SessionConfiguration& configuration, bool overrideForPerfClass,
+        bool overrideToPortrait, CameraMetadata* sessionCharacteristics /*out*/) const {
     if (!flags::feature_combination_query()) {
         return INVALID_OPERATION;
     }
+
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     auto deviceInfo = findDeviceInfoLocked(id);
     if (deviceInfo == nullptr) {
         return NAME_NOT_FOUND;
     }
 
+    metadataGetter getMetadata = [this, overrideToPortrait](const std::string& id,
+                                                            bool overrideForPerfClass) {
+        CameraMetadata metadata;
+        status_t ret = this->getCameraCharacteristicsLocked(id, overrideForPerfClass, &metadata,
+                                                            overrideToPortrait);
+        if (ret != OK) {
+            ALOGE("%s: Could not get CameraCharacteristics for device %s", __FUNCTION__,
+                  id.c_str());
+        }
+        return metadata;
+    };
+
     return deviceInfo->getSessionCharacteristics(configuration,
             overrideForPerfClass, getMetadata, sessionCharacteristics);
 }
@@ -1807,18 +1836,13 @@
     auto& c = mCameraCharacteristics;
 
     auto entry = c.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
-    if (entry.count != 0) {
-        ALOGE("%s: CameraCharacteristics must not contain ANDROID_SENSOR_READOUT_TIMESTAMP!",
-                __FUNCTION__);
+    if (entry.count == 0) {
+        uint8_t defaultReadoutTimestamp = readoutTimestampSupported ?
+                                          ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE :
+                                          ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
+        res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &defaultReadoutTimestamp, 1);
     }
 
-    uint8_t readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
-    if (readoutTimestampSupported) {
-        readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
-    }
-
-    res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &readoutTimestamp, 1);
-
     return res;
 }
 
@@ -2101,8 +2125,14 @@
         const std::string& providerName, const sp<ProviderInfo>& providerInfo) {
     using aidl::android::hardware::camera::provider::ICameraProvider;
 
-    std::shared_ptr<ICameraProvider> interface =
-            mAidlServiceProxy->getAidlService(providerName.c_str());
+    std::shared_ptr<ICameraProvider> interface;
+    if (flags::delay_lazy_hal_instantiation()) {
+        // Only get remote instance if already running. Lazy Providers will be
+        // woken up later.
+        interface = mAidlServiceProxy->tryGetService(providerName);
+    } else {
+        interface = mAidlServiceProxy->getService(providerName);
+    }
 
     if (interface == nullptr) {
         ALOGW("%s: AIDL Camera provider HAL '%s' is not actually available", __FUNCTION__,
@@ -3249,7 +3279,8 @@
 }
 
 bool CameraProviderManager::isVirtualCameraHalEnabled() {
-    return vd_flags::virtual_camera_service_discovery();
+    return vd_flags::virtual_camera_service_discovery() &&
+           vd_flags::virtual_camera_service_build_flag();
 }
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 53a2102..5ff3fcd 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -178,9 +178,15 @@
     // Proxy to inject fake services in test.
     class AidlServiceInteractionProxy {
       public:
-        // Returns the Aidl service with the given serviceName
+        // Returns the Aidl service with the given serviceName. Will wait indefinitely
+        // for the service to come up if not running.
         virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-        getAidlService(const std::string& serviceName) = 0;
+        getService(const std::string& serviceName) = 0;
+
+        // Attempts to get an already running AIDL service of the given serviceName.
+        // Returns nullptr immediately if service is not running.
+        virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+        tryGetService(const std::string& serviceName) = 0;
 
         virtual ~AidlServiceInteractionProxy() = default;
     };
@@ -190,7 +196,10 @@
     class AidlServiceInteractionProxyImpl : public AidlServiceInteractionProxy {
       public:
         virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-        getAidlService(const std::string& serviceName) override;
+        getService(const std::string& serviceName) override;
+
+        virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+        tryGetService(const std::string& serviceName) override;
     };
 
     /**
@@ -321,7 +330,8 @@
      */
      status_t getSessionCharacteristics(const std::string& id,
             const SessionConfiguration &configuration,
-            bool overrideForPerfClass, camera3::metadataGetter getMetadata,
+            bool overrideForPerfClass,
+            bool overrideToPortrait,
             CameraMetadata* sessionCharacteristics /*out*/) const;
 
     /**
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index d773af3..a721d28 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -275,54 +275,58 @@
     if (mSavedInterface != nullptr) {
         return mSavedInterface;
     }
+
     if (!kEnableLazyHal) {
         ALOGE("Bad provider state! Should not be here on a non-lazy HAL!");
         return nullptr;
     }
 
     auto interface = mActiveInterface.lock();
-    if (interface == nullptr) {
-        // Try to get service without starting
-        interface =
-                    ICameraProvider::fromBinder(
-                            ndk::SpAIBinder(AServiceManager_checkService(mProviderName.c_str())));
-        if (interface == nullptr) {
-            ALOGV("Camera provider actually needs restart, calling getService(%s)",
-                  mProviderName.c_str());
-            interface = mManager->mAidlServiceProxy->getAidlService(mProviderName.c_str());
-
-            if (interface == nullptr) {
-                ALOGD("%s: %s service not started", __FUNCTION__, mProviderName.c_str());
-                return nullptr;
-            }
-
-            // Set all devices as ENUMERATING, provider should update status
-            // to PRESENT after initializing.
-            // This avoids failing getCameraDeviceInterface_V3_x before devices
-            // are ready.
-            for (auto& device : mDevices) {
-              device->mIsDeviceAvailable = false;
-            }
-
-            interface->setCallback(mCallbacks);
-            auto link = AIBinder_linkToDeath(interface->asBinder().get(), mDeathRecipient.get(),
-                    this);
-            if (link != STATUS_OK) {
-                ALOGW("%s: Unable to link to provider '%s' death notifications",
-                        __FUNCTION__, mProviderName.c_str());
-                mManager->removeProvider(mProviderInstance);
-                return nullptr;
-            }
-
-            // Send current device state
-            interface->notifyDeviceStateChange(mDeviceState);
-        }
-        mActiveInterface = interface;
-    } else {
-        ALOGV("Camera provider (%s) already in use. Re-using instance.",
-              mProviderName.c_str());
+    if (interface != nullptr) {
+        ALOGV("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
+        return interface;
     }
 
+    // Try to get service without starting
+    interface = ICameraProvider::fromBinder(
+            ndk::SpAIBinder(AServiceManager_checkService(mProviderName.c_str())));
+    if (interface != nullptr) {
+        // Service is already running. Cache and return.
+        mActiveInterface = interface;
+        return interface;
+    }
+
+    ALOGV("Camera provider actually needs restart, calling getService(%s)", mProviderName.c_str());
+    interface = mManager->mAidlServiceProxy->getService(mProviderName);
+
+    if (interface == nullptr) {
+        ALOGE("%s: %s service not started", __FUNCTION__, mProviderName.c_str());
+        return nullptr;
+    }
+
+    // Set all devices as ENUMERATING, provider should update status
+    // to PRESENT after initializing.
+    // This avoids failing getCameraDeviceInterface_V3_x before devices
+    // are ready.
+    for (auto& device : mDevices) {
+      device->mIsDeviceAvailable = false;
+    }
+
+    interface->setCallback(mCallbacks);
+    auto link = AIBinder_linkToDeath(interface->asBinder().get(), mDeathRecipient.get(),
+            this);
+    if (link != STATUS_OK) {
+        ALOGW("%s: Unable to link to provider '%s' death notifications",
+                __FUNCTION__, mProviderName.c_str());
+        mManager->removeProvider(mProviderInstance);
+        return nullptr;
+    }
+
+    // Send current device state
+    interface->notifyDeviceStateChange(mDeviceState);
+    // Cache interface to return early for future calls.
+    mActiveInterface = interface;
+
     return interface;
 }
 
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
index d2643c1..065f0c5 100644
--- a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
@@ -692,6 +692,14 @@
         mHasFlashUnit = false;
     }
 
+    if (flags::feature_combination_query()) {
+        res = addSessionConfigQueryVersionTag();
+        if (OK != res) {
+            ALOGE("%s: Unable to add sessionConfigurationQueryVersion tag: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+        }
+    }
+
     camera_metadata_entry entry =
             mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL);
     if (entry.count == 1) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 28b2d78..c0a0544 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -69,6 +69,7 @@
 #include "utils/SchedulingPolicyUtils.h"
 #include "utils/SessionConfigurationUtils.h"
 #include "utils/TraceHFR.h"
+#include "utils/Utils.h"
 
 #include <algorithm>
 #include <optional>
@@ -80,7 +81,7 @@
 
 namespace flags = com::android::internal::camera::flags;
 namespace android {
-namespace flags = com::android::internal::camera::flags;
+
 Camera3Device::Camera3Device(std::shared_ptr<CameraServiceProxyWrapper>& cameraServiceProxyWrapper,
         const std::string &id, bool overrideForPerfClass, bool overrideToPortrait,
         bool legacyClient):
@@ -145,17 +146,6 @@
     /** Register in-flight map to the status tracker */
     mInFlightStatusId = mStatusTracker->addComponent("InflightRequests");
 
-    if (mUseHalBufManager) {
-        res = mRequestBufferSM.initialize(mStatusTracker);
-        if (res != OK) {
-            SET_ERR_L("Unable to start request buffer state machine: %s (%d)",
-                    strerror(-res), res);
-            mInterface->close();
-            mStatusTracker.clear();
-            return res;
-        }
-    }
-
     /** Create buffer manager */
     mBufferManager = new Camera3BufferManager();
 
@@ -1622,7 +1612,9 @@
     mStatusWaiters++;
 
     bool signalPipelineDrain = false;
-    if (!active && mUseHalBufManager) {
+    if (!active &&
+            (mUseHalBufManager ||
+                    (flags::session_hal_buf_manager() && mHalBufManagedStreamIds.size() != 0))) {
         auto streamIds = mOutputStreams.getStreamIds();
         if (mStatus == STATUS_ACTIVE) {
             mRequestThread->signalPipelineDrain(streamIds);
@@ -2538,7 +2530,7 @@
     }
 
     config.streams = streams.editArray();
-    config.use_hal_buf_manager = mUseHalBufManager;
+    config.hal_buffer_managed_streams = mHalBufManagedStreamIds;
 
     // Do the HAL configuration; will potentially touch stream
     // max_buffers, usage, and priv fields, as well as data_space and format
@@ -2562,13 +2554,17 @@
                 strerror(-res), res);
         return res;
     }
+    mUseHalBufManager = config.use_hal_buf_manager;
     if (flags::session_hal_buf_manager()) {
-        bool prevSessionHalBufManager = mUseHalBufManager;
-        // It is possible that configureStreams() changed config.use_hal_buf_manager
-        mUseHalBufManager = config.use_hal_buf_manager;
-        if (prevSessionHalBufManager && !mUseHalBufManager) {
+        bool prevSessionHalBufManager = (mHalBufManagedStreamIds.size() != 0);
+        // It is possible that configureStreams() changed config.hal_buffer_managed_streams
+        mHalBufManagedStreamIds = config.hal_buffer_managed_streams;
+
+        bool thisSessionHalBufManager = mHalBufManagedStreamIds.size() != 0;
+
+        if (prevSessionHalBufManager && !thisSessionHalBufManager) {
             mRequestBufferSM.deInit();
-        } else if (!prevSessionHalBufManager && mUseHalBufManager) {
+        } else if (!prevSessionHalBufManager && thisSessionHalBufManager) {
             res = mRequestBufferSM.initialize(mStatusTracker);
             if (res != OK) {
                 SET_ERR_L("%s: Camera %s: RequestBuffer State machine couldn't be initialized!",
@@ -2576,7 +2572,7 @@
                 return res;
             }
         }
-        mRequestThread->setHalBufferManager(mUseHalBufManager);
+        mRequestThread->setHalBufferManagedStreams(mHalBufManagedStreamIds);
     }
     // Finish all stream configuration immediately.
     // TODO: Try to relax this later back to lazy completion, which should be
@@ -2904,7 +2900,8 @@
 
     FlushInflightReqStates states {
         mId, mInFlightLock, mInFlightMap, mUseHalBufManager,
-        listener, *this, *mInterface, *this, mSessionStatsBuilder};
+        mHalBufManagedStreamIds, listener, *this, *mInterface, *this,
+        mSessionStatsBuilder};
 
     camera3::flushInflightRequests(states);
 }
@@ -2969,6 +2966,11 @@
     return mBufferRecords.verifyBufferIds(streamId, bufIds);
 }
 
+bool Camera3Device::HalInterface::isHalBufferManagedStream(int32_t streamId) const {
+    return (mUseHalBufManager || (flags::session_hal_buf_manager() &&
+                                  contains(mHalBufManagedStreamIds, streamId)));
+}
+
 status_t Camera3Device::HalInterface::popInflightBuffer(
         int32_t frameNumber, int32_t streamId,
         /*out*/ buffer_handle_t **buffer) {
@@ -3061,7 +3063,7 @@
         mOverrideToPortrait(overrideToPortrait),
         mSupportSettingsOverride(supportSettingsOverride) {
     mStatusId = statusTracker->addComponent("RequestThread");
-    mVndkVersion = property_get_int32("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 
 Camera3Device::RequestThread::~RequestThread() {}
@@ -3287,8 +3289,9 @@
     mDoPauseSignal.signal();
 }
 
-void Camera3Device::RequestThread::setHalBufferManager(bool enabled) {
-    mUseHalBufManager = enabled;
+void Camera3Device::RequestThread::setHalBufferManagedStreams(
+            const std::set<int32_t> &halBufferManagedStreams) {
+    mHalBufManagedStreamIds = halBufferManagedStreams;
 }
 
 status_t Camera3Device::RequestThread::waitUntilRequestProcessed(
@@ -3972,11 +3975,15 @@
         nsecs_t waitDuration = kBaseGetBufferWait + parent->getExpectedInFlightDuration();
 
         SurfaceMap uniqueSurfaceIdMap;
+        bool containsHalBufferManagedStream = false;
         for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
             sp<Camera3OutputStreamInterface> outputStream =
                     captureRequest->mOutputStreams.editItemAt(j);
             int streamId = outputStream->getId();
-
+            if (!containsHalBufferManagedStream) {
+                containsHalBufferManagedStream =
+                        contains(mHalBufManagedStreamIds, streamId);
+            }
             // Prepare video buffers for high speed recording on the first video request.
             if (mPrepareVideoStream && outputStream->isVideoStream()) {
                 // Only try to prepare video stream on the first video request.
@@ -4008,7 +4015,7 @@
                 uniqueSurfaceIdMap.insert({streamId, std::move(uniqueSurfaceIds)});
             }
 
-            if (mUseHalBufManager) {
+            if (parent->isHalBufferManagedStream(streamId)) {
                 if (outputStream->isAbandoned()) {
                     ALOGV("%s: stream %d is abandoned, skipping request", __FUNCTION__, streamId);
                     return TIMED_OUT;
@@ -4099,6 +4106,9 @@
                 isZslCapture = true;
             }
         }
+        bool passSurfaceMap =
+                mUseHalBufManager ||
+                        (flags::session_hal_buf_manager() && containsHalBufferManagedStream);
         auto expectedDurationInfo = calculateExpectedDurationRange(settings);
         res = parent->registerInFlight(halRequest->frame_number,
                 totalNumBuffers, captureRequest->mResultExtras,
@@ -4110,7 +4120,7 @@
                 requestedPhysicalCameras, isStillCapture, isZslCapture,
                 captureRequest->mRotateAndCropAuto, captureRequest->mAutoframingAuto,
                 mPrevCameraIdsWithZoom,
-                (mUseHalBufManager) ? uniqueSurfaceIdMap :
+                passSurfaceMap ? uniqueSurfaceIdMap :
                                       SurfaceMap{}, captureRequest->mRequestTimeNs);
         ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
                ", burstId = %" PRId32 ".",
@@ -4213,7 +4223,8 @@
 }
 
 void Camera3Device::RequestThread::signalPipelineDrain(const std::vector<int>& streamIds) {
-    if (!mUseHalBufManager) {
+    if (!mUseHalBufManager &&
+            (flags::session_hal_buf_manager() && mHalBufManagedStreamIds.size() == 0)) {
         ALOGE("%s called for camera device not supporting HAL buffer management", __FUNCTION__);
         return;
     }
@@ -4365,22 +4376,28 @@
             captureRequest->mInputStream->returnInputBuffer(captureRequest->mInputBuffer);
         }
 
-        // No output buffer can be returned when using HAL buffer manager
-        if (!mUseHalBufManager) {
-            for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
-                //Buffers that failed processing could still have
-                //valid acquire fence.
-                int acquireFence = (*outputBuffers)[i].acquire_fence;
-                if (0 <= acquireFence) {
-                    close(acquireFence);
-                    outputBuffers->editItemAt(i).acquire_fence = -1;
-                }
-                outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
-                        /*timestamp*/0, /*readoutTimestamp*/0,
-                        /*timestampIncreasing*/true, std::vector<size_t> (),
-                        captureRequest->mResultExtras.frameNumber);
+        for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
+            //Buffers that failed processing could still have
+            //valid acquire fence.
+            Camera3Stream *stream = Camera3Stream::cast((*outputBuffers)[i].stream);
+            int32_t streamId = stream->getId();
+            bool skipBufferForStream =
+                    mUseHalBufManager || (flags::session_hal_buf_manager() &&
+                            contains(mHalBufManagedStreamIds, streamId));
+            if (skipBufferForStream) {
+                // No output buffer can be returned when using HAL buffer manager for its stream
+                continue;
             }
+            int acquireFence = (*outputBuffers)[i].acquire_fence;
+            if (0 <= acquireFence) {
+                close(acquireFence);
+                outputBuffers->editItemAt(i).acquire_fence = -1;
+            }
+            outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
+            captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
+                    /*timestamp*/0, /*readoutTimestamp*/0,
+                    /*timestampIncreasing*/true, std::vector<size_t> (),
+                    captureRequest->mResultExtras.frameNumber);
         }
 
         if (sendRequestError) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e32a36f..498ef55 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -97,6 +97,10 @@
         return mInterface->getTransportType();
     }
 
+    bool isHalBufferManagedStream(int32_t streamId) const {
+        return mInterface->isHalBufferManagedStream(streamId);
+    };
+
     /**
      * CameraDeviceBase interface
      */
@@ -476,6 +480,9 @@
 
         /////////////////////////////////////////////////////////////////////
 
+        //Check if a stream is hal buffer managed
+        bool isHalBufferManagedStream(int32_t streamId) const;
+
         // Get a vector of (frameNumber, streamId) pair of currently inflight
         // buffers
         void getInflightBufferKeys(std::vector<std::pair<int32_t, int32_t>>* out);
@@ -547,7 +554,9 @@
 
         uint32_t mNextStreamConfigCounter = 1;
 
+        // TODO: This can be removed after flags::session_hal_buf_manager is removed
         bool mUseHalBufManager = false;
+        std::set<int32_t > mHalBufManagedStreamIds;
         bool mIsReconfigurationQuerySupported;
 
         const bool mSupportOfflineProcessing;
@@ -948,11 +957,11 @@
         void     setPaused(bool paused);
 
         /**
-         * Set Hal buffer manager behavior
-         * @param enabled Whether HAL buffer manager is enabled for the current session.
+         * Set Hal buffer managed streams
+         * @param halBufferManagedStreams The streams for which hal buffer manager is enabled
          *
          */
-        void setHalBufferManager(bool enabled);
+        void setHalBufferManagedStreams(const std::set<int32_t> &halBufferManagedStreams);
 
         /**
          * Wait until thread processes the capture request with settings'
@@ -1203,6 +1212,7 @@
         std::map<int32_t, std::set<std::string>> mGroupIdPhysicalCameraMap;
 
         bool               mUseHalBufManager = false;
+        std::set<int32_t > mHalBufManagedStreamIds;
         const bool         mSupportCameraMute;
         const bool         mOverrideToPortrait;
         const bool         mSupportSettingsOverride;
@@ -1393,6 +1403,7 @@
 
     // Whether HAL request buffers through requestStreamBuffers API
     bool mUseHalBufManager = false;
+    std::set<int32_t > mHalBufManagedStreamIds;
     bool mSessionHalBufManager = false;
     // Lock to ensure requestStreamBuffers() callbacks are serialized
     std::mutex mRequestBufferInterfaceLock;
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
index 172b62a..1025061 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
@@ -58,6 +58,7 @@
         mTagMonitor(offlineStates.mTagMonitor),
         mVendorTagId(offlineStates.mVendorTagId),
         mUseHalBufManager(offlineStates.mUseHalBufManager),
+        mHalBufManagedStreamIds(offlineStates.mHalBufManagedStreamIds),
         mNeedFixupMonochromeTags(offlineStates.mNeedFixupMonochromeTags),
         mUsePartialResult(offlineStates.mUsePartialResult),
         mNumPartialResults(offlineStates.mNumPartialResults),
@@ -136,7 +137,7 @@
 
     FlushInflightReqStates states {
         mId, mOfflineReqsLock, mOfflineReqs, mUseHalBufManager,
-        listener, *this, mBufferRecords, *this, mSessionStatsBuilder};
+        mHalBufManagedStreamIds, listener, *this, mBufferRecords, *this, mSessionStatsBuilder};
 
     camera3::flushInflightRequests(states);
 
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.h b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
index b5fd486..1ef3921 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
@@ -51,7 +51,8 @@
 struct Camera3OfflineStates {
     Camera3OfflineStates(
             const TagMonitor& tagMonitor, const metadata_vendor_id_t vendorTagId,
-            const bool useHalBufManager, const bool needFixupMonochromeTags,
+            const bool useHalBufManager, const std::set<int32_t> &halBufferManagedStreamIds,
+            const bool needFixupMonochromeTags,
             const bool usePartialResult, const uint32_t numPartialResults,
             const int64_t lastCompletedRegularFN, const int64_t lastCompletedReprocessFN,
             const int64_t lastCompletedZslFN, const uint32_t nextResultFN,
@@ -64,7 +65,8 @@
             const std::unordered_map<std::string, camera3::RotateAndCropMapper>&
                 rotateAndCropMappers) :
             mTagMonitor(tagMonitor), mVendorTagId(vendorTagId),
-            mUseHalBufManager(useHalBufManager), mNeedFixupMonochromeTags(needFixupMonochromeTags),
+            mUseHalBufManager(useHalBufManager), mHalBufManagedStreamIds(halBufferManagedStreamIds),
+            mNeedFixupMonochromeTags(needFixupMonochromeTags),
             mUsePartialResult(usePartialResult), mNumPartialResults(numPartialResults),
             mLastCompletedRegularFrameNumber(lastCompletedRegularFN),
             mLastCompletedReprocessFrameNumber(lastCompletedReprocessFN),
@@ -85,6 +87,7 @@
     const metadata_vendor_id_t mVendorTagId;
 
     const bool mUseHalBufManager;
+    const std::set<int32_t > &mHalBufManagedStreamIds;
     const bool mNeedFixupMonochromeTags;
 
     const bool mUsePartialResult;
@@ -181,6 +184,7 @@
     const metadata_vendor_id_t mVendorTagId;
 
     const bool mUseHalBufManager;
+    const std::set<int32_t > &mHalBufManagedStreamIds;
     const bool mNeedFixupMonochromeTags;
 
     const bool mUsePartialResult;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 450f3dd..89e08a1 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -45,13 +45,17 @@
 #include <camera/CameraUtils.h>
 #include <camera/StringUtils.h>
 #include <camera_metadata_hidden.h>
+#include <com_android_internal_camera_flags.h>
 
 #include "device3/Camera3OutputUtils.h"
+#include "utils/SessionConfigurationUtils.h"
 
 #include "system/camera_metadata.h"
 
 using namespace android::camera3;
+using namespace android::camera3::SessionConfigurationUtils;
 using namespace android::hardware::camera;
+namespace flags = com::android::internal::camera::flags;
 
 namespace android {
 namespace camera3 {
@@ -495,7 +499,8 @@
     states.inflightIntf.onInflightEntryRemovedLocked(duration);
 }
 
-void removeInFlightRequestIfReadyLocked(CaptureOutputStates& states, int idx) {
+void removeInFlightRequestIfReadyLocked(CaptureOutputStates& states, int idx,
+        std::vector<BufferToReturn> *returnableBuffers) {
     InFlightRequestMap& inflightMap = states.inflightMap;
     const InFlightRequest &request = inflightMap.valueAt(idx);
     const uint32_t frameNumber = inflightMap.keyAt(idx);
@@ -533,11 +538,13 @@
         assert(request.requestStatus != OK ||
                request.pendingOutputBuffers.size() == 0);
 
-        returnOutputBuffers(
-            states.useHalBufManager, states.listener,
+        collectReturnableOutputBuffers(
+            states.useHalBufManager, states.halBufManagedStreamIds,
+            states.listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
             /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+            /*out*/ returnableBuffers,
             /*timestampIncreasing*/true,
             request.outputSurfaces, request.resultExtras,
             request.errorBufStrategy, request.transform);
@@ -632,6 +639,7 @@
     // in-flight request and they will be returned when the shutter timestamp
     // arrives. Update the in-flight status and remove the in-flight entry if
     // all result data and shutter timestamp have been received.
+    std::vector<BufferToReturn> returnableBuffers{};
     nsecs_t shutterTimestamp = 0;
     {
         std::lock_guard<std::mutex> l(states.inflightLock);
@@ -793,9 +801,11 @@
         request.pendingOutputBuffers.appendArray(result->output_buffers,
                 result->num_output_buffers);
         if (shutterTimestamp != 0) {
-            returnAndRemovePendingOutputBuffers(
-                states.useHalBufManager, states.listener,
-                request, states.sessionStatsBuilder);
+            collectAndRemovePendingOutputBuffers(
+                states.useHalBufManager, states.halBufManagedStreamIds,
+                states.listener,
+                request, states.sessionStatsBuilder,
+                /*out*/ &returnableBuffers);
         }
 
         if (result->result != NULL && !isPartialResult) {
@@ -820,9 +830,18 @@
                     request.physicalMetadatas);
             }
         }
-        removeInFlightRequestIfReadyLocked(states, idx);
+        removeInFlightRequestIfReadyLocked(states, idx, &returnableBuffers);
+        if (!flags::return_buffers_outside_locks()) {
+            finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+        }
     } // scope for states.inFlightLock
 
+    if (flags::return_buffers_outside_locks()) {
+        finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+    }
+
     if (result->input_buffer != NULL) {
         if (hasInputBufferInRequest) {
             Camera3Stream *stream =
@@ -843,16 +862,17 @@
     }
 }
 
-void returnOutputBuffers(
+void collectReturnableOutputBuffers(
         bool useHalBufManager,
+        const std::set<int32_t> &halBufferManagedStreams,
         sp<NotificationListener> listener,
         const camera_stream_buffer_t *outputBuffers, size_t numBuffers,
         nsecs_t timestamp, nsecs_t readoutTimestamp, bool requested,
         nsecs_t requestTimeNs, SessionStatsBuilder& sessionStatsBuilder,
+        /*out*/ std::vector<BufferToReturn> *returnableBuffers,
         bool timestampIncreasing, const SurfaceMap& outputSurfaces,
-        const CaptureResultExtras &inResultExtras,
+        const CaptureResultExtras &resultExtras,
         ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
-
     for (size_t i = 0; i < numBuffers; i++)
     {
         Camera3StreamInterface *stream = Camera3Stream::cast(outputBuffers[i].stream);
@@ -862,7 +882,7 @@
         if (outputBuffers[i].status == CAMERA_BUFFER_STATUS_ERROR &&
                 errorBufStrategy == ERROR_BUF_RETURN_NOTIFY) {
             if (listener != nullptr) {
-                CaptureResultExtras extras = inResultExtras;
+                CaptureResultExtras extras = resultExtras;
                 extras.errorStreamId = streamId;
                 listener->notifyError(
                         hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
@@ -871,7 +891,9 @@
         }
 
         if (outputBuffers[i].buffer == nullptr) {
-            if (!useHalBufManager) {
+            if (!useHalBufManager &&
+                    !(flags::session_hal_buf_manager() &&
+                            contains(halBufferManagedStreams, streamId))) {
                 // With HAL buffer management API, HAL sometimes will have to return buffers that
                 // has not got a output buffer handle filled yet. This is though illegal if HAL
                 // buffer management API is not being used.
@@ -885,22 +907,35 @@
         }
 
         const auto& it = outputSurfaces.find(streamId);
-        status_t res = OK;
 
         // Do not return the buffer if the buffer status is error, and the error
         // buffer strategy is CACHE.
         if (outputBuffers[i].status != CAMERA_BUFFER_STATUS_ERROR ||
                 errorBufStrategy != ERROR_BUF_CACHE) {
             if (it != outputSurfaces.end()) {
-                res = stream->returnBuffer(
+                returnableBuffers->emplace_back(stream,
                         outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
-                        it->second, inResultExtras.frameNumber, transform);
+                        it->second, resultExtras,
+                        transform, requested ? requestTimeNs : 0);
             } else {
-                res = stream->returnBuffer(
+                returnableBuffers->emplace_back(stream,
                         outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
-                        std::vector<size_t> (), inResultExtras.frameNumber, transform);
+                        std::vector<size_t> (), resultExtras,
+                        transform, requested ? requestTimeNs : 0 );
             }
         }
+    }
+}
+
+void finishReturningOutputBuffers(const std::vector<BufferToReturn> &returnableBuffers,
+        sp<NotificationListener> listener, SessionStatsBuilder& sessionStatsBuilder) {
+    for (auto& b : returnableBuffers) {
+        const int streamId = b.stream->getId();
+
+        status_t res = b.stream->returnBuffer(b.buffer, b.timestamp,
+                b.readoutTimestamp, b.timestampIncreasing,
+                b.surfaceIds, b.resultExtras.frameNumber, b.transform);
+
         // Note: stream may be deallocated at this point, if this buffer was
         // the last reference to it.
         bool dropped = false;
@@ -911,50 +946,55 @@
             ALOGE("Can't return buffer to its stream: %s (%d)", strerror(-res), res);
             dropped = true;
         } else {
-            if (outputBuffers[i].status == CAMERA_BUFFER_STATUS_ERROR || timestamp == 0) {
+            if (b.buffer.status == CAMERA_BUFFER_STATUS_ERROR || b.timestamp == 0) {
                 dropped = true;
             }
         }
-        if (requested) {
+        if (b.requestTimeNs > 0) {
             nsecs_t bufferTimeNs = systemTime();
-            int32_t captureLatencyMs = ns2ms(bufferTimeNs - requestTimeNs);
+            int32_t captureLatencyMs = ns2ms(bufferTimeNs - b.requestTimeNs);
             sessionStatsBuilder.incCounter(streamId, dropped, captureLatencyMs);
         }
 
         // Long processing consumers can cause returnBuffer timeout for shared stream
         // If that happens, cancel the buffer and send a buffer error to client
-        if (it != outputSurfaces.end() && res == TIMED_OUT &&
-                outputBuffers[i].status == CAMERA_BUFFER_STATUS_OK) {
+        if (b.surfaceIds.size() > 0 && res == TIMED_OUT &&
+                b.buffer.status == CAMERA_BUFFER_STATUS_OK) {
             // cancel the buffer
-            camera_stream_buffer_t sb = outputBuffers[i];
+            camera_stream_buffer_t sb = b.buffer;
             sb.status = CAMERA_BUFFER_STATUS_ERROR;
-            stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
-                    timestampIncreasing, std::vector<size_t> (),
-                    inResultExtras.frameNumber, transform);
+            b.stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
+                    b.timestampIncreasing, std::vector<size_t> (),
+                    b.resultExtras.frameNumber, b.transform);
 
             if (listener != nullptr) {
-                CaptureResultExtras extras = inResultExtras;
+                CaptureResultExtras extras = b.resultExtras;
                 extras.errorStreamId = streamId;
                 listener->notifyError(
                         hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
                         extras);
             }
         }
+
     }
 }
 
-void returnAndRemovePendingOutputBuffers(bool useHalBufManager,
+void collectAndRemovePendingOutputBuffers(bool useHalBufManager,
+        const std::set<int32_t> &halBufferManagedStreams,
         sp<NotificationListener> listener, InFlightRequest& request,
-        SessionStatsBuilder& sessionStatsBuilder) {
+        SessionStatsBuilder& sessionStatsBuilder,
+        std::vector<BufferToReturn> *returnableBuffers) {
     bool timestampIncreasing =
             !((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
     nsecs_t readoutTimestamp = request.resultExtras.hasReadoutTimestamp ?
             request.resultExtras.readoutTimestamp : 0;
-    returnOutputBuffers(useHalBufManager, listener,
+    collectReturnableOutputBuffers(useHalBufManager, halBufferManagedStreams, listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(),
             request.shutterTimestamp, readoutTimestamp,
-            /*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
+            /*requested*/true, request.requestTimeNs, sessionStatsBuilder,
+            /*out*/ returnableBuffers,
+            timestampIncreasing,
             request.outputSurfaces, request.resultExtras,
             request.errorBufStrategy, request.transform);
 
@@ -974,6 +1014,9 @@
     ATRACE_CALL();
     ssize_t idx;
 
+    std::vector<BufferToReturn> returnableBuffers{};
+    CaptureResultExtras pendingNotificationResultExtras{};
+
     // Set timestamp for the request in the in-flight tracking
     // and get the request ID to send upstream
     {
@@ -1040,9 +1083,13 @@
                             states.lastCompletedReprocessFrameNumber;
                     r.resultExtras.lastCompletedZslFrameNumber =
                             states.lastCompletedZslFrameNumber;
-                    states.listener->notifyShutter(r.resultExtras, msg.timestamp);
+                    if (flags::return_buffers_outside_locks()) {
+                        pendingNotificationResultExtras = r.resultExtras;
+                    } else {
+                        states.listener->notifyShutter(r.resultExtras, msg.timestamp);
+                    }
                 }
-                // send pending result and buffers
+                // send pending result and buffers; this queues them up for delivery later
                 const auto& cameraIdsWithZoom = getCameraIdsWithZoomLocked(
                         inflightMap, r.pendingMetadata, r.cameraIdsWithZoom);
                 sendCaptureResult(states,
@@ -1051,16 +1098,35 @@
                     r.hasInputBuffer, r.zslCapture && r.stillCapture,
                     r.rotateAndCropAuto, cameraIdsWithZoom, r.physicalMetadatas);
             }
-            returnAndRemovePendingOutputBuffers(
-                    states.useHalBufManager, states.listener, r, states.sessionStatsBuilder);
+            collectAndRemovePendingOutputBuffers(
+                    states.useHalBufManager, states.halBufManagedStreamIds,
+                    states.listener, r, states.sessionStatsBuilder, &returnableBuffers);
 
-            removeInFlightRequestIfReadyLocked(states, idx);
+            if (!flags::return_buffers_outside_locks()) {
+                finishReturningOutputBuffers(returnableBuffers,
+                        states.listener, states.sessionStatsBuilder);
+            }
+
+            removeInFlightRequestIfReadyLocked(states, idx, &returnableBuffers);
+
         }
     }
     if (idx < 0) {
         SET_ERR("Shutter notification for non-existent frame number %d",
                 msg.frame_number);
     }
+    // Call notifyShutter outside of in-flight mutex
+    if (flags::return_buffers_outside_locks() && pendingNotificationResultExtras.isValid()) {
+        states.listener->notifyShutter(pendingNotificationResultExtras, msg.timestamp);
+    }
+
+    // With no locks held, finish returning buffers to streams, which may take a while since
+    // binder calls are involved
+    if (flags::return_buffers_outside_locks()) {
+        finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+    }
+
 }
 
 void notifyError(CaptureOutputStates& states, const camera_error_msg_t &msg) {
@@ -1106,6 +1172,8 @@
             break;
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+        {
+            std::vector<BufferToReturn> returnableBuffers{};
             {
                 std::lock_guard<std::mutex> l(states.inflightLock);
                 ssize_t idx = states.inflightMap.indexOfKey(msg.frame_number);
@@ -1142,7 +1210,12 @@
 
                         // Check whether the buffers returned. If they returned,
                         // remove inflight request.
-                        removeInFlightRequestIfReadyLocked(states, idx);
+                        removeInFlightRequestIfReadyLocked(states, idx, &returnableBuffers);
+                        if (!flags::return_buffers_outside_locks()) {
+                            finishReturningOutputBuffers(returnableBuffers,
+                                    states.listener, states.sessionStatsBuilder);
+                        }
+
                     }
                 } else {
                     resultExtras.frameNumber = msg.frame_number;
@@ -1151,6 +1224,12 @@
                             resultExtras.frameNumber);
                 }
             }
+
+            if (flags::return_buffers_outside_locks()) {
+                finishReturningOutputBuffers(returnableBuffers,
+                        states.listener, states.sessionStatsBuilder);
+            }
+
             resultExtras.errorStreamId = streamId;
             if (states.listener != nullptr) {
                 states.listener->notifyError(errorCode, resultExtras);
@@ -1159,6 +1238,7 @@
                         states.cameraId.c_str(), __FUNCTION__);
             }
             break;
+        }
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
             // Do not depend on HAL ERROR_CAMERA_BUFFER to send buffer error
             // callback to the app. Rather, use STATUS_ERROR of image buffers.
@@ -1188,17 +1268,24 @@
 
 void flushInflightRequests(FlushInflightReqStates& states) {
     ATRACE_CALL();
+    std::vector<BufferToReturn> returnableBuffers{};
     { // First return buffers cached in inFlightMap
         std::lock_guard<std::mutex> l(states.inflightLock);
         for (size_t idx = 0; idx < states.inflightMap.size(); idx++) {
             const InFlightRequest &request = states.inflightMap.valueAt(idx);
-            returnOutputBuffers(
-                states.useHalBufManager, states.listener,
+            collectReturnableOutputBuffers(
+                states.useHalBufManager, states.halBufManagedStreamIds,
+                states.listener,
                 request.pendingOutputBuffers.array(),
                 request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
                 /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+                /*out*/ &returnableBuffers,
                 /*timestampIncreasing*/true, request.outputSurfaces, request.resultExtras,
                 request.errorBufStrategy);
+            if (!flags::return_buffers_outside_locks()) {
+                finishReturningOutputBuffers(returnableBuffers,
+                        states.listener, states.sessionStatsBuilder);
+            }
             ALOGW("%s: Frame %d |  Timestamp: %" PRId64 ", metadata"
                     " arrived: %s, buffers left: %d.\n", __FUNCTION__,
                     states.inflightMap.keyAt(idx), request.shutterTimestamp,
@@ -1209,6 +1296,10 @@
         states.inflightMap.clear();
         states.inflightIntf.onInflightMapFlushedLocked();
     }
+    if (flags::return_buffers_outside_locks()) {
+        finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+    }
 
     // Then return all inflight buffers not returned by HAL
     std::vector<std::pair<int32_t, int32_t>> inflightKeys;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 134c037..75864d7 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -44,15 +44,50 @@
      * Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
      */
 
-    // helper function to return the output buffers to output streams. The
-    // function also optionally calls notify(ERROR_BUFFER).
-    void returnOutputBuffers(
+    struct BufferToReturn {
+        Camera3StreamInterface *stream;
+        camera_stream_buffer_t buffer;
+        nsecs_t timestamp;
+        nsecs_t readoutTimestamp;
+        bool timestampIncreasing;
+        std::vector<size_t> surfaceIds;
+        const CaptureResultExtras resultExtras;
+        int32_t transform;
+        nsecs_t requestTimeNs;
+
+        BufferToReturn(Camera3StreamInterface *stream,
+                camera_stream_buffer_t buffer,
+                nsecs_t timestamp, nsecs_t readoutTimestamp,
+                bool timestampIncreasing, std::vector<size_t> surfaceIds,
+                const CaptureResultExtras &resultExtras,
+                int32_t transform, nsecs_t requestTimeNs):
+            stream(stream),
+            buffer(buffer),
+            timestamp(timestamp),
+            readoutTimestamp(readoutTimestamp),
+            timestampIncreasing(timestampIncreasing),
+            surfaceIds(surfaceIds),
+            resultExtras(resultExtras),
+            transform(transform),
+            requestTimeNs(requestTimeNs) {}
+    };
+
+    // helper function to return the output buffers to output
+    // streams. The function also optionally calls
+    // notify(ERROR_BUFFER).  Returns the list of buffers to hand back
+    // to streams in returnableBuffers.  Does not make any two-way
+    // binder calls, so suitable for use when critical locks are being
+    // held
+    void collectReturnableOutputBuffers(
             bool useHalBufManager,
+            const std::set<int32_t> &halBufferManagedStreams,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
             const camera_stream_buffer_t *outputBuffers,
             size_t numBuffers, nsecs_t timestamp,
             nsecs_t readoutTimestamp, bool requested, nsecs_t requestTimeNs,
-            SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing = true,
+            SessionStatsBuilder& sessionStatsBuilder,
+            /*out*/ std::vector<BufferToReturn> *returnableBuffers,
+            bool timestampIncreasing = true,
             // The following arguments are only meant for surface sharing use case
             const SurfaceMap& outputSurfaces = SurfaceMap{},
             // Used to send buffer error callback when failing to return buffer
@@ -60,13 +95,24 @@
             ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN,
             int32_t transform = -1);
 
-    // helper function to return the output buffers to output streams, and
-    // remove the returned buffers from the inflight request's pending buffers
-    // vector.
-    void returnAndRemovePendingOutputBuffers(
+    // helper function to collect the output buffers ready to be
+    // returned to output streams, and to remove these buffers from
+    // the inflight request's pending buffers vector.  Does not make
+    // any two-way binder calls, so suitable for use when critical
+    // locks are being held
+    void collectAndRemovePendingOutputBuffers(
             bool useHalBufManager,
+            const std::set<int32_t> &halBufferManagedStreams,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
-            InFlightRequest& request, SessionStatsBuilder& sessionStatsBuilder);
+            InFlightRequest& request, SessionStatsBuilder& sessionStatsBuilder,
+            /*out*/ std::vector<BufferToReturn> *returnableBuffers);
+
+    // Actually return filled output buffers to the consumer to use, using the list
+    // provided by collectReturnableOutputBuffers / collectAndRemovePendingOutputBuffers
+    // Makes two-way binder calls to applications, so do not hold any critical locks when
+    // calling.
+    void finishReturningOutputBuffers(const std::vector<BufferToReturn> &returnableBuffers,
+            sp<NotificationListener> listener, SessionStatsBuilder& sessionStatsBuilder);
 
     // Camera3Device/Camera3OfflineSession internal states used in notify/processCaptureResult
     // callbacks
@@ -87,6 +133,7 @@
         uint32_t& nextReprocResultFrameNum;
         uint32_t& nextZslResultFrameNum; // end of outputLock scope
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         const bool usePartialResult;
         const bool needFixupMonoChrome;
         const uint32_t numPartialResults;
@@ -118,6 +165,7 @@
         const std::string& cameraId;
         std::mutex& reqBufferLock; // lock to serialize request buffer calls
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         StreamSet& outputStreams;
         SessionStatsBuilder& sessionStatsBuilder;
         SetErrorInterface& setErrIntf;
@@ -128,6 +176,7 @@
     struct ReturnBufferStates {
         const std::string& cameraId;
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         StreamSet& outputStreams;
         SessionStatsBuilder& sessionStatsBuilder;
         BufferRecordsInterface& bufferRecordsIntf;
@@ -138,6 +187,7 @@
         std::mutex& inflightLock;
         InFlightRequestMap& inflightMap; // end of inflightLock scope
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         sp<NotificationListener> listener;
         InflightRequestUpdateInterface& inflightIntf;
         BufferRecordsInterface& bufferRecordsIntf;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
index 3ac666b..aca7a67 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
@@ -32,13 +32,17 @@
 
 #include <camera/CameraUtils.h>
 #include <camera_metadata_hidden.h>
+#include <com_android_internal_camera_flags.h>
 
 #include "device3/Camera3OutputUtils.h"
+#include "utils/SessionConfigurationUtils.h"
 
 #include "system/camera_metadata.h"
 
 using namespace android::camera3;
+using namespace android::camera3::SessionConfigurationUtils;
 using namespace android::hardware::camera;
+namespace flags = com::android::internal::camera::flags;
 
 namespace android {
 namespace camera3 {
@@ -207,7 +211,9 @@
 
         bool noBufferReturned = false;
         buffer_handle_t *buffer = nullptr;
-        if (states.useHalBufManager) {
+        if (states.useHalBufManager ||
+                (flags::session_hal_buf_manager() &&
+                        contains(states.halBufManagedStreamIds, bSrc.streamId))) {
             // This is suspicious most of the time but can be correct during flush where HAL
             // has to return capture result before a buffer is requested
             if (bSrc.bufferId == BUFFER_ID_NO_BUFFER) {
@@ -294,13 +300,15 @@
 template <class VecStreamBufferType>
 void returnStreamBuffersT(ReturnBufferStates& states,
         const VecStreamBufferType& buffers) {
-    if (!states.useHalBufManager) {
-        ALOGE("%s: Camera %s does not support HAL buffer managerment",
-                __FUNCTION__, states.cameraId.c_str());
-        return;
-    }
 
     for (const auto& buf : buffers) {
+        if (!states.useHalBufManager &&
+            !(flags::session_hal_buf_manager() &&
+             contains(states.halBufManagedStreamIds, buf.streamId))) {
+            ALOGE("%s: Camera %s does not support HAL buffer management for stream id %d",
+                  __FUNCTION__, states.cameraId.c_str(), buf.streamId);
+            return;
+        }
         if (buf.bufferId == BUFFER_ID_NO_BUFFER) {
             ALOGE("%s: cannot return a buffer without bufferId", __FUNCTION__);
             continue;
@@ -337,9 +345,15 @@
             continue;
         }
         streamBuffer.stream = stream->asHalStream();
-        returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*readoutTimestamp*/0,
-                /*requested*/false, /*requestTimeNs*/0, states.sessionStatsBuilder);
+        std::vector<BufferToReturn> returnableBuffers{};
+        collectReturnableOutputBuffers(states.useHalBufManager, states.halBufManagedStreamIds,
+                /*listener*/nullptr, &streamBuffer, /*size*/1, /*timestamp*/ 0,
+                /*readoutTimestamp*/0, /*requested*/false, /*requestTimeNs*/0,
+                states.sessionStatsBuilder,
+                /*out*/&returnableBuffers);
+        finishReturningOutputBuffers(returnableBuffers, /*listener*/ nullptr,
+                states.sessionStatsBuilder);
+
     }
 }
 
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index a7bd312..3626f20 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -35,6 +35,7 @@
     uint32_t operation_mode;
     bool input_is_multi_resolution;
     bool use_hal_buf_manager = false;
+    std::set<int32_t> hal_buffer_managed_streams;
 } camera_stream_configuration_t;
 
 typedef struct camera_capture_request {
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 97475f0..13c500f 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -59,6 +59,7 @@
 #include <com_android_internal_camera_flags.h>
 
 #include "utils/CameraTraces.h"
+#include "utils/SessionConfigurationUtils.h"
 #include "mediautils/SchedulingPolicyService.h"
 #include "device3/Camera3OutputStream.h"
 #include "device3/Camera3InputStream.h"
@@ -79,6 +80,7 @@
 #include "AidlCamera3Device.h"
 
 using namespace android::camera3;
+using namespace android::camera3::SessionConfigurationUtils;
 using namespace aidl::android::hardware;
 using aidl::android::hardware::camera::metadata::SensorPixelMode;
 using aidl::android::hardware::camera::metadata::RequestAvailableDynamicRangeProfilesMap;
@@ -337,6 +339,16 @@
 
     mBatchSizeLimitEnabled = (deviceVersion >= CAMERA_DEVICE_API_VERSION_1_2);
 
+    camera_metadata_entry readoutSupported = mDeviceInfo.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
+    if (readoutSupported.count == 0) {
+        ALOGW("%s: Could not find value corresponding to ANDROID_SENSOR_READOUT_TIMESTAMP. "
+              "Assuming true.", __FUNCTION__);
+        mSensorReadoutTimestampSupported = true;
+    } else {
+        mSensorReadoutTimestampSupported =
+                readoutSupported.data.u8[0] == ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
+    }
+
     return initializeCommonLocked();
 }
 
@@ -400,7 +412,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -442,7 +454,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -450,7 +462,7 @@
         mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
-        camera3::notify(states, msg);
+        camera3::notify(states, msg, mSensorReadoutTimestampSupported);
     }
     return ::ndk::ScopedAStatus::ok();
 
@@ -531,7 +543,7 @@
         }
 
         // When not using HAL buf manager, only allow streams requested by app to be preserved
-        if (!mUseHalBufManager) {
+        if (!isHalBufferManagedStream(id)) {
             if (std::find(streamsToKeep.begin(), streamsToKeep.end(), id) == streamsToKeep.end()) {
                 SET_ERR("stream ID %d must not be switched to offline!", id);
                 return UNKNOWN_ERROR;
@@ -611,17 +623,18 @@
     // TODO: check if we need to lock before copying states
     //       though technically no other thread should be talking to Camera3Device at this point
     Camera3OfflineStates offlineStates(
-            mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
-            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
-            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
-            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
-            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
-            mZoomRatioMappers, mRotateAndCropMappers);
+            mTagMonitor, mVendorTagId, mUseHalBufManager, mHalBufManagedStreamIds,
+            mNeedFixupMonochromeTags, mUsePartialResult, mNumPartialResults,
+            mLastCompletedRegularFrameNumber, mLastCompletedReprocessFrameNumber,
+            mLastCompletedZslFrameNumber, mNextResultFrameNumber,
+            mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+            mNextShutterFrameNumber, mNextReprocessShutterFrameNumber,
+            mNextZslStillShutterFrameNumber, mDeviceInfo, mPhysicalDeviceInfoMap,
+            mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers);
 
     *session = new AidlCamera3OfflineSession(mId, inputStream, offlineStreamSet,
-            std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
+                                             std::move(bufferRecords), offlineReqs, offlineStates,
+                                             offlineSession, mSensorReadoutTimestampSupported);
 
     // Delete all streams that has been transferred to offline session
     Mutex::Autolock l(mLock);
@@ -688,8 +701,8 @@
         aidl::android::hardware::camera::device::BufferRequestStatus* status) {
 
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams,
-        mSessionStatsBuilder, *this, *(mInterface), *this};
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mHalBufManagedStreamIds,
+        mOutputStreams, mSessionStatsBuilder, *this, *(mInterface), *this};
     camera3::requestStreamBuffers(states, bufReqs, outBuffers, status);
     return ::ndk::ScopedAStatus::ok();
 }
@@ -713,7 +726,7 @@
 ::ndk::ScopedAStatus AidlCamera3Device::returnStreamBuffers(
         const std::vector<camera::device::StreamBuffer>& buffers) {
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams,  mSessionStatsBuilder,
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams,  mSessionStatsBuilder,
         *(mInterface)};
     camera3::returnStreamBuffers(states, buffers);
     return ::ndk::ScopedAStatus::ok();
@@ -900,11 +913,18 @@
     std::set<int> activeStreams;
     camera::device::StreamConfiguration requestedConfiguration;
     requestedConfiguration.streams.resize(config->num_streams);
+    config->use_hal_buf_manager = mUseHalBufManager;
     for (size_t i = 0; i < config->num_streams; i++) {
         camera::device::Stream &dst = requestedConfiguration.streams[i];
         camera3::camera_stream_t *src = config->streams[i];
 
         Camera3Stream* cam3stream = Camera3Stream::cast(src);
+        // For stream configurations with multi res streams, hal buffer manager has to be used.
+        if (!flags::session_hal_buf_manager() && cam3stream->getHalStreamGroupId() != -1 &&
+                src->stream_type != CAMERA_STREAM_INPUT) {
+            mUseHalBufManager = true;
+            config->use_hal_buf_manager = true;
+        }
         cam3stream->setBufferFreedListener(this);
         int streamId = cam3stream->getId();
         StreamType streamType;
@@ -975,31 +995,38 @@
     requestedConfiguration.multiResolutionInputImage = config->input_is_multi_resolution;
     requestedConfiguration.logId = logId;
     ndk::ScopedAStatus err = ndk::ScopedAStatus::ok();
+    int32_t interfaceVersion = 0;
     camera::device::ConfigureStreamsRet configureStreamsRet;
-    if (flags::session_hal_buf_manager()) {
-        int32_t interfaceVersion = 0;
-        err = mAidlSession->getInterfaceVersion(&interfaceVersion);
-        if (!err.isOk()) {
-            ALOGE("%s: Transaction error getting interface version: %s", __FUNCTION__,
-                    err.getMessage());
-            return AidlProviderInfo::mapToStatusT(err);
-        }
-        if (interfaceVersion >= AIDL_DEVICE_SESSION_V3 && mSupportSessionHalBufManager) {
-            err = mAidlSession->configureStreamsV2(requestedConfiguration, &configureStreamsRet);
-            finalConfiguration = std::move(configureStreamsRet.halStreams);
-        } else {
-            err = mAidlSession->configureStreams(requestedConfiguration, &finalConfiguration);
-        }
+    err = mAidlSession->getInterfaceVersion(&interfaceVersion);
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error getting interface version: %s", __FUNCTION__,
+              err.getMessage());
+        return AidlProviderInfo::mapToStatusT(err);
+    }
+    if (flags::session_hal_buf_manager() && interfaceVersion >= AIDL_DEVICE_SESSION_V3
+            && mSupportSessionHalBufManager) {
+        err = mAidlSession->configureStreamsV2(requestedConfiguration, &configureStreamsRet);
+        finalConfiguration = std::move(configureStreamsRet.halStreams);
     } else {
         err = mAidlSession->configureStreams(requestedConfiguration, &finalConfiguration);
     }
+
     if (!err.isOk()) {
         ALOGE("%s: Transaction error: %s", __FUNCTION__, err.getMessage());
         return AidlProviderInfo::mapToStatusT(err);
     }
-    if (flags::session_hal_buf_manager() && mSupportSessionHalBufManager) {
-        mUseHalBufManager = configureStreamsRet.enableHalBufferManager;
-        config->use_hal_buf_manager = configureStreamsRet.enableHalBufferManager;
+
+    if (flags::session_hal_buf_manager()) {
+        std::set<int32_t> halBufferManagedStreamIds;
+        for (const auto &halStream: finalConfiguration) {
+            if ((interfaceVersion >= AIDL_DEVICE_SESSION_V3 &&
+                    mSupportSessionHalBufManager && halStream.enableHalBufferManager)
+                    || mUseHalBufManager) {
+                halBufferManagedStreamIds.insert(halStream.id);
+            }
+        }
+        mHalBufManagedStreamIds = std::move(halBufferManagedStreamIds);
+        config->hal_buffer_managed_streams = mHalBufManagedStreamIds;
     }
     // And convert output stream configuration from AIDL
     for (size_t i = 0; i < config->num_streams; i++) {
@@ -1070,9 +1097,9 @@
             }
             dstStream->setUsage(
                     mapProducerToFrameworkUsage(src.producerUsage));
-
             if (flags::session_hal_buf_manager()) {
-                dstStream->setHalBufferManager(mUseHalBufManager);
+                dstStream->setHalBufferManager(
+                        contains(config->hal_buffer_managed_streams, streamId));
             }
         }
         dst->max_buffers = src.maxBuffers;
@@ -1396,7 +1423,7 @@
                     handlesCreated->push_back(acquireFence);
                 }
                 dst.acquireFence = camera3::dupToAidlIfNotNull(acquireFence);
-            } else if (mUseHalBufManager) {
+            } else if (isHalBufferManagedStream(streamId)) {
                 // HAL buffer management path
                 dst.bufferId = BUFFER_ID_NO_BUFFER;
                 dst.buffer = aidl::android::hardware::common::NativeHandle();
@@ -1410,7 +1437,7 @@
             dst.releaseFence = aidl::android::hardware::common::NativeHandle();
 
             // Output buffers are empty when using HAL buffer manager
-            if (!mUseHalBufManager) {
+            if (!isHalBufferManagedStream(streamId)) {
                 mBufferRecords.pushInflightBuffer(
                         captureRequest->frameNumber, streamId, src->buffer);
                 inflightBuffers->push_back(std::make_pair(captureRequest->frameNumber, streamId));
@@ -1456,8 +1483,9 @@
                 bool supportCameraMute,
                 bool overrideToPortrait,
                 bool supportSettingsOverride) :
-          RequestThread(parent, statusTracker, interface, sessionParamKeys, useHalBufManager,
-                  supportCameraMute, overrideToPortrait, supportSettingsOverride) {}
+          RequestThread(parent, statusTracker, interface, sessionParamKeys,
+                  useHalBufManager, supportCameraMute, overrideToPortrait,
+                  supportSettingsOverride) {}
 
 status_t AidlCamera3Device::AidlRequestThread::switchToOffline(
         const std::vector<int32_t>& streamsToKeep,
@@ -1690,7 +1718,8 @@
                 bool overrideToPortrait,
                 bool supportSettingsOverride) {
     return new AidlRequestThread(parent, statusTracker, interface, sessionParamKeys,
-            useHalBufManager, supportCameraMute, overrideToPortrait, supportSettingsOverride);
+            useHalBufManager, supportCameraMute, overrideToPortrait,
+            supportSettingsOverride);
 };
 
 sp<Camera3Device::Camera3DeviceInjectionMethods>
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
index 90e2f97..f0a5f7e 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
@@ -289,6 +289,9 @@
     // capture requests.
     bool mBatchSizeLimitEnabled = false;
 
+    // Whether the HAL supports reporting sensor readout timestamp
+    bool mSensorReadoutTimestampSupported = true;
+
 }; // class AidlCamera3Device
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
index 01c4e88..f8308df 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
@@ -122,7 +122,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -169,7 +169,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -177,7 +177,7 @@
         /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
-        camera3::notify(states, msg);
+        camera3::notify(states, msg, mSensorReadoutTimestampSupported);
     }
     return ::ndk::ScopedAStatus::ok();
 }
@@ -208,7 +208,8 @@
     }
 
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager,
+        mHalBufManagedStreamIds, mOutputStreams, mSessionStatsBuilder,
         *this, mBufferRecords, *this};
     camera3::requestStreamBuffers(states, bufReqs, buffers, status);
     return ::ndk::ScopedAStatus::ok();
@@ -241,7 +242,7 @@
     }
 
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams, mSessionStatsBuilder,
         mBufferRecords};
 
     camera3::returnStreamBuffers(states, buffers);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
index 33b638c..f8fdeb9 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
@@ -105,19 +105,20 @@
     };
 
     // initialize by Camera3Device.
-    explicit AidlCamera3OfflineSession(const std::string& id,
-            const sp<camera3::Camera3Stream>& inputStream,
-            const camera3::StreamSet& offlineStreamSet,
-            camera3::BufferRecords&& bufferRecords,
+    explicit AidlCamera3OfflineSession(
+            const std::string& id, const sp<camera3::Camera3Stream>& inputStream,
+            const camera3::StreamSet& offlineStreamSet, camera3::BufferRecords&& bufferRecords,
             const camera3::InFlightRequestMap& offlineReqs,
             const Camera3OfflineStates& offlineStates,
             std::shared_ptr<aidl::android::hardware::camera::device::ICameraOfflineSession>
-                    offlineSession) :
-      Camera3OfflineSession(id, inputStream, offlineStreamSet, std::move(bufferRecords),
-              offlineReqs, offlineStates),
-      mSession(offlineSession) {
-        mCallbacks = ndk::SharedRefBase::make<AidlCameraDeviceCallbacks>(this);
-      };
+                    offlineSession,
+            bool sensorReadoutTimestampSupported)
+        : Camera3OfflineSession(id, inputStream, offlineStreamSet, std::move(bufferRecords),
+                                offlineReqs, offlineStates),
+          mSession(offlineSession),
+          mSensorReadoutTimestampSupported(sensorReadoutTimestampSupported) {
+            mCallbacks = ndk::SharedRefBase::make<AidlCameraDeviceCallbacks>(this);
+    };
 
     /**
      * End of CameraOfflineSessionBase interface
@@ -130,6 +131,8 @@
 
     std::shared_ptr<AidlCameraDeviceCallbacks> mCallbacks;
 
+    bool mSensorReadoutTimestampSupported;
+
     virtual void closeSessionLocked() override;
 
     virtual void releaseSessionLocked() override;
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
index 74d4230..d9c8e57 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
@@ -67,7 +67,8 @@
 }
 
 void notify(CaptureOutputStates& states,
-        const aidl::android::hardware::camera::device::NotifyMsg& msg) {
+            const aidl::android::hardware::camera::device::NotifyMsg& msg,
+            bool hasReadoutTimestamp) {
 
     using ErrorCode = aidl::android::hardware::camera::device::ErrorCode;
     using Tag = aidl::android::hardware::camera::device::NotifyMsg::Tag;
@@ -110,8 +111,9 @@
             m.type = CAMERA_MSG_SHUTTER;
             m.message.shutter.frame_number = msg.get<Tag::shutter>().frameNumber;
             m.message.shutter.timestamp = msg.get<Tag::shutter>().timestamp;
-            m.message.shutter.readout_timestamp_valid = true;
-            m.message.shutter.readout_timestamp = msg.get<Tag::shutter>().readoutTimestamp;
+            m.message.shutter.readout_timestamp_valid = hasReadoutTimestamp;
+            m.message.shutter.readout_timestamp =
+                    hasReadoutTimestamp ? msg.get<Tag::shutter>().readoutTimestamp : 0LL;
             break;
     }
     notify(states, &m);
@@ -143,12 +145,6 @@
     std::lock_guard<std::mutex> lock(states.reqBufferLock);
     std::vector<StreamBufferRet> bufRets;
     outBuffers->clear();
-    if (!states.useHalBufManager) {
-        ALOGE("%s: Camera %s does not support HAL buffer management",
-                __FUNCTION__, states.cameraId.c_str());
-        *status = BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS;
-        return;
-    }
 
     SortedVector<int32_t> streamIds;
     ssize_t sz = streamIds.setCapacity(bufReqs.size());
@@ -174,6 +170,13 @@
             *status = BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS;
             return;
         }
+        if (!states.useHalBufManager &&
+                !contains(states.halBufManagedStreamIds, bufReq.streamId)) {
+            ALOGE("%s: Camera %s does not support HAL buffer management for stream id %d",
+                  __FUNCTION__, states.cameraId.c_str(), bufReq.streamId);
+            *status = BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS;
+            return;
+        }
         streamIds.add(bufReq.streamId);
     }
 
@@ -316,10 +319,15 @@
                 sb.acquire_fence = -1;
                 sb.status = CAMERA_BUFFER_STATUS_ERROR;
             }
-            returnOutputBuffers(states.useHalBufManager, nullptr,
-                    streamBuffers.data(), numAllocatedBuffers, 0,
-                    0, false,
-                    0, states.sessionStatsBuilder);
+            std::vector<BufferToReturn> returnableBuffers{};
+            collectReturnableOutputBuffers(states.useHalBufManager, states.halBufManagedStreamIds,
+                    /*listener*/ nullptr,
+                    streamBuffers.data(), numAllocatedBuffers, /*timestamp*/ 0,
+                    /*readoutTimestamp*/ 0, /*requested*/ false,
+                    /*requestTimeNs*/ 0, states.sessionStatsBuilder,
+                    /*out*/ &returnableBuffers);
+            finishReturningOutputBuffers(returnableBuffers, /*listener*/ nullptr,
+                    states.sessionStatsBuilder);
             for (auto buf : newBuffers) {
                 states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
             }
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h
index e795624..d3a8ede 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h
@@ -79,11 +79,8 @@
                     &physicalCameraMetadata);
 
     void notify(CaptureOutputStates& states,
-        const aidl::android::hardware::camera::device::NotifyMsg& msg,
-        bool hasReadoutTime, uint64_t readoutTime);
-
-    void notify(CaptureOutputStates& states,
-        const aidl::android::hardware::camera::device::NotifyMsg& msg);
+            const aidl::android::hardware::camera::device::NotifyMsg& msg,
+            bool hasReadoutTimestamp);
 
     void requestStreamBuffers(RequestBufferStates& states,
         const std::vector<aidl::android::hardware::camera::device::BufferRequest>& bufReqs,
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 4488067..f2e618f 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -47,6 +47,7 @@
 #include <utils/Timers.h>
 #include <cutils/properties.h>
 #include <camera/StringUtils.h>
+#include <com_android_internal_camera_flags.h>
 
 #include <android/hardware/camera/device/3.7/ICameraInjectionSession.h>
 #include <android/hardware/camera2/ICameraDeviceUser.h>
@@ -66,6 +67,7 @@
 using namespace android::hardware::camera;
 using namespace android::hardware::camera::device::V3_2;
 using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
+namespace flags = com::android::internal::camera::flags;
 
 namespace android {
 
@@ -307,7 +309,8 @@
         const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
         requestStreamBuffers_cb _hidl_cb) {
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mHalBufManagedStreamIds,
+        mOutputStreams, mSessionStatsBuilder,
         *this, *mInterface, *this};
     camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
     return hardware::Void();
@@ -316,7 +319,8 @@
 hardware::Return<void> HidlCamera3Device::returnStreamBuffers(
         const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, *mInterface};
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams,
+        mSessionStatsBuilder, *mInterface};
     camera3::returnStreamBuffers(states, buffers);
     return hardware::Void();
 }
@@ -362,7 +366,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -425,7 +429,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -473,7 +477,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -641,14 +645,14 @@
     // TODO: check if we need to lock before copying states
     //       though technically no other thread should be talking to Camera3Device at this point
     Camera3OfflineStates offlineStates(
-            mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
-            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
-            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
-            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
-            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
-            mZoomRatioMappers, mRotateAndCropMappers);
+            mTagMonitor, mVendorTagId, mUseHalBufManager, mHalBufManagedStreamIds,
+            mNeedFixupMonochromeTags, mUsePartialResult, mNumPartialResults,
+            mLastCompletedRegularFrameNumber, mLastCompletedReprocessFrameNumber,
+            mLastCompletedZslFrameNumber, mNextResultFrameNumber,
+            mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+            mNextShutterFrameNumber, mNextReprocessShutterFrameNumber,
+            mNextZslStillShutterFrameNumber, mDeviceInfo, mPhysicalDeviceInfoMap,
+            mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers);
 
     *session = new HidlCamera3OfflineSession(mId, inputStream, offlineStreamSet,
             std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
@@ -716,7 +720,8 @@
                 bool overrideToPortrait,
                 bool supportSettingsOverride) {
         return new HidlRequestThread(parent, statusTracker, interface, sessionParamKeys,
-                useHalBufManager, supportCameraMute, overrideToPortrait, supportSettingsOverride);
+                useHalBufManager, supportCameraMute, overrideToPortrait,
+                supportSettingsOverride);
 };
 
 sp<Camera3Device::Camera3DeviceInjectionMethods>
@@ -909,6 +914,7 @@
     requestedConfiguration3_2.streams.resize(config->num_streams);
     requestedConfiguration3_4.streams.resize(config->num_streams);
     requestedConfiguration3_7.streams.resize(config->num_streams);
+    mHalBufManagedStreamIds.clear();
     for (size_t i = 0; i < config->num_streams; i++) {
         device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
         device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
@@ -922,6 +928,9 @@
         switch (src->stream_type) {
             case CAMERA_STREAM_OUTPUT:
                 streamType = StreamType::OUTPUT;
+                if (flags::session_hal_buf_manager() && mUseHalBufManager) {
+                    mHalBufManagedStreamIds.insert(streamId);
+                }
                 break;
             case CAMERA_STREAM_INPUT:
                 streamType = StreamType::INPUT;
@@ -931,6 +940,7 @@
                         __FUNCTION__, streamId, config->streams[i]->stream_type);
                 return BAD_VALUE;
         }
+
         dst3_2.id = streamId;
         dst3_2.streamType = streamType;
         dst3_2.width = src->width;
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
index e328ef6..aa4b762 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -103,7 +103,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -145,7 +145,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -182,7 +182,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -207,7 +207,8 @@
     }
 
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager,mHalBufManagedStreamIds,
+        mOutputStreams, mSessionStatsBuilder,
         *this, mBufferRecords, *this};
     camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
     return hardware::Void();
@@ -224,7 +225,8 @@
     }
 
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, mBufferRecords};
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams, mSessionStatsBuilder,
+        mBufferRecords};
 
     camera3::returnStreamBuffers(states, buffers);
     return hardware::Void();
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
index 59fc1cd..d607d10 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
@@ -25,6 +25,7 @@
 #include <hidl/Utils.h>
 #include <android/hardware/camera/device/3.2/types.h>
 #include <android-base/properties.h>
+#include <utils/Utils.h>
 
 namespace android {
 namespace frameworks {
@@ -58,7 +59,7 @@
     const sp<hardware::camera2::ICameraDeviceUser> &deviceRemote)
   : mDeviceRemote(deviceRemote) {
     mInitSuccess = initDevice();
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 
 bool HidlCameraDeviceUser::initDevice() {
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 94bf653..1a5a6b9 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -25,6 +25,8 @@
 
 #include <hidl/HidlTransportSupport.h>
 
+#include <utils/Utils.h>
+
 namespace android {
 namespace frameworks {
 namespace cameraservice {
@@ -56,8 +58,8 @@
 }
 
 HidlCameraService::HidlCameraService(android::CameraService *cs) : mAidlICameraService(cs) {
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
-};
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
+}
 
 Return<void>
 HidlCameraService::getCameraCharacteristics(const hidl_string& cameraId,
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index a53d26d..939126c 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -372,28 +372,22 @@
 };
 
 /**
- * Simple test version of the interaction proxy, to use to inject onRegistered calls to the
+ * Simple test version of HidlServiceInteractionProxy, to use to inject onRegistered calls to the
  * CameraProviderManager
  */
-struct TestInteractionProxy : public CameraProviderManager::HidlServiceInteractionProxy,
-                              public CameraProviderManager::AidlServiceInteractionProxy {
+struct TestHidlInteractionProxy : public CameraProviderManager::HidlServiceInteractionProxy {
     sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
     sp<TestICameraProvider> mTestCameraProvider;
-    std::shared_ptr<TestAidlICameraProvider> mTestAidlCameraProvider;
 
-    TestInteractionProxy() {}
+    TestHidlInteractionProxy() {}
 
     void setProvider(sp<TestICameraProvider> provider) {
         mTestCameraProvider = provider;
     }
 
-    void setAidlProvider(std::shared_ptr<TestAidlICameraProvider> provider) {
-        mTestAidlCameraProvider = provider;
-    }
-
     std::vector<std::string> mLastRequestedServiceNames;
 
-    virtual ~TestInteractionProxy() {}
+    virtual ~TestHidlInteractionProxy() {}
 
     virtual bool registerForNotifications(
             [[maybe_unused]] const std::string &serviceName,
@@ -430,9 +424,47 @@
         hardware::hidl_vec<hardware::hidl_string> ret = {"test/0"};
         return ret;
     }
+};
+
+/**
+ * Simple test version of AidlServiceInteractionProxy, to use to inject onRegistered calls to the
+ * CameraProviderManager
+ */
+struct TestAidlInteractionProxy : public CameraProviderManager::AidlServiceInteractionProxy {
+    std::shared_ptr<TestAidlICameraProvider> mTestAidlCameraProvider;
+
+    TestAidlInteractionProxy() {}
+
+    void setProvider(std::shared_ptr<TestAidlICameraProvider> provider) {
+        mTestAidlCameraProvider = provider;
+    }
+
+    std::vector<std::string> mLastRequestedServiceNames;
+
+    virtual ~TestAidlInteractionProxy() {}
 
     virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-    getAidlService(const std::string&) {
+            getService(const std::string& serviceName) override {
+        if (!flags::delay_lazy_hal_instantiation()) {
+            return mTestAidlCameraProvider;
+        }
+
+        // If no provider has been given, fail; in reality, getService would
+        // block for HALs that don't start correctly, so we should never use
+        // getService when we don't have a valid HAL running
+        if (mTestAidlCameraProvider == nullptr) {
+            ADD_FAILURE() << __FUNCTION__ << "called with no valid provider;"
+                          << " would block indefinitely";
+            // Real getService would block, but that's bad in unit tests. So
+            // just record an error and return nullptr
+            return nullptr;
+        }
+        mLastRequestedServiceNames.push_back(serviceName);
+        return mTestAidlCameraProvider;
+    }
+
+    virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+    tryGetService(const std::string&) override {
         return mTestAidlCameraProvider;
     }
 };
@@ -462,7 +494,7 @@
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
 
     android::hardware::hidl_vec<uint8_t> chars;
     CameraMetadata meta;
@@ -510,7 +542,7 @@
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
     serviceProxy.setProvider(provider);
@@ -560,7 +592,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
 
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
@@ -696,7 +728,7 @@
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
     serviceProxy.setProvider(provider);
@@ -730,7 +762,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
 
@@ -779,7 +811,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
 
@@ -821,7 +853,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
 
     android::hardware::hidl_vec<uint8_t> chars;
     CameraMetadata meta;
@@ -857,9 +889,11 @@
                 REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(vd_flags, virtual_camera_service_discovery))) {
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestAidlInteractionProxy aidlServiceProxy;
+    TestHidlInteractionProxy hidlServiceProxy;
 
-    status_t res = providerManager->initialize(statusListener, &serviceProxy, &serviceProxy);
+    status_t res = providerManager->initialize(statusListener,
+                                               &hidlServiceProxy, &aidlServiceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
 
     std::vector<std::string> cameraList = {"device@1.1/virtual/123"};
@@ -868,7 +902,7 @@
             ndk::SharedRefBase::make<TestAidlICameraProvider>(cameraList);
     ndk::SpAIBinder spBinder = aidlProvider->asBinder();
     AIBinder* aiBinder = spBinder.get();
-    serviceProxy.setAidlProvider(aidlProvider);
+    aidlServiceProxy.setProvider(aidlProvider);
     providerManager->onServiceRegistration(
             String16("android.hardware.camera.provider.ICameraProvider/virtual/0"),
             AIBinder_toPlatformBinder(aiBinder));
@@ -883,15 +917,17 @@
                 REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(vd_flags, virtual_camera_service_discovery))) {
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestAidlInteractionProxy aidlServiceProxy;
+    TestHidlInteractionProxy hidlServiceProxy;
 
     std::vector<std::string> cameraList = {"device@1.1/virtual/123"};
 
     std::shared_ptr<TestAidlICameraProvider> aidlProvider =
             ndk::SharedRefBase::make<TestAidlICameraProvider>(cameraList);
-    serviceProxy.setAidlProvider(aidlProvider);
+    aidlServiceProxy.setProvider(aidlProvider);
 
-    status_t res = providerManager->initialize(statusListener, &serviceProxy, &serviceProxy);
+    status_t res = providerManager->initialize(statusListener,
+                                               &hidlServiceProxy, &aidlServiceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
 
     std::unordered_map<std::string, std::set<std::string>> unavailableDeviceIds;
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index aef6531..11ef9b7 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -1133,7 +1133,7 @@
 }
 
 void filterParameters(const CameraMetadata& src, const CameraMetadata& deviceInfo,
-        int vendorTagId, CameraMetadata& dst) {
+        metadata_vendor_id_t vendorTagId, CameraMetadata& dst) {
     const CameraMetadata params(src);
     camera_metadata_ro_entry_t availableSessionKeys = deviceInfo.find(
             ANDROID_REQUEST_AVAILABLE_SESSION_KEYS);
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 29e3eca..0545cea 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -177,7 +177,11 @@
         aidl::android::hardware::camera::device::RequestTemplate* tempId /*out*/);
 
 void filterParameters(const CameraMetadata& src, const CameraMetadata& deviceInfo,
-        int vendorTagId, CameraMetadata& dst);
+        metadata_vendor_id_t vendorTagId, CameraMetadata& dst);
+
+template <typename T> bool contains(std::set<T> container, T value) {
+    return container.find(value) != container.end();
+}
 
 constexpr int32_t MAX_SURFACES_PER_STREAM = 4;
 
diff --git a/services/camera/libcameraservice/utils/Utils.cpp b/services/camera/libcameraservice/utils/Utils.cpp
new file mode 100644
index 0000000..c8f5e86
--- /dev/null
+++ b/services/camera/libcameraservice/utils/Utils.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Utils.h"
+#include <android-base/properties.h>
+#include <com_android_internal_camera_flags.h>
+
+
+namespace android {
+
+using namespace com::android::internal::camera::flags;
+
+constexpr const char *LEGACY_VNDK_VERSION_PROP = "ro.vndk.version";
+constexpr const char *BOARD_API_LEVEL_PROP = "ro.board.api_level";
+constexpr int MAX_VENDOR_API_LEVEL = 1000000;
+constexpr int FIRST_VNDK_VERSION = 202404;
+
+int getVNDKVersionFromProp(int defaultVersion) {
+    if (!com_android_internal_camera_flags_use_ro_board_api_level_for_vndk_version()) {
+        return base::GetIntProperty(LEGACY_VNDK_VERSION_PROP, defaultVersion);
+    }
+
+    int vndkVersion = base::GetIntProperty(BOARD_API_LEVEL_PROP, MAX_VENDOR_API_LEVEL);
+
+    if (vndkVersion == MAX_VENDOR_API_LEVEL) {
+        // Couldn't find property
+        return defaultVersion;
+    }
+
+    if (vndkVersion < __ANDROID_API_V__) {
+        // VNDK versions below V return the corresponding SDK version.
+        return vndkVersion;
+    }
+
+    // VNDK for Android V and above are of the format YYYYMM starting with 202404 and is bumped
+    // up once a year. So V would be 202404 and the next one would be 202504.
+    // This is the same assumption as that made in system/core/init/property_service.cpp.
+    vndkVersion = (vndkVersion - FIRST_VNDK_VERSION) / 100;
+    return __ANDROID_API_V__ + vndkVersion;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/utils/Utils.h b/services/camera/libcameraservice/utils/Utils.h
new file mode 100644
index 0000000..f8a107d
--- /dev/null
+++ b/services/camera/libcameraservice/utils/Utils.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_UTILS_H
+#define ANDROID_SERVERS_CAMERA_UTILS_H
+
+namespace android {
+
+/**
+ * As of Android V, ro.board.api_level returns the year and month of release (ex. 202404)
+ * instead of release SDK version. This function maps year/month format back to release
+ * SDK version.
+ *
+ * Returns defaultVersion if the property is not found.
+ */
+int getVNDKVersionFromProp(int defaultVersion);
+
+} // namespace android
+
+#endif //ANDROID_SERVERS_CAMERA_UTILS_H
diff --git a/services/camera/virtualcamera/Android.bp b/services/camera/virtualcamera/Android.bp
index cb4e10f..94007cd 100644
--- a/services/camera/virtualcamera/Android.bp
+++ b/services/camera/virtualcamera/Android.bp
@@ -54,7 +54,7 @@
         "util/EglProgram.cc",
         "util/EglSurfaceTexture.cc",
         "util/EglUtil.cc",
-        "util/Permissions.cc"
+        "util/Permissions.cc",
     ],
     defaults: [
         "libvirtualcamera_defaults",
diff --git a/services/camera/virtualcamera/TEST_MAPPING b/services/camera/virtualcamera/TEST_MAPPING
index 66c5e52..25fca73 100644
--- a/services/camera/virtualcamera/TEST_MAPPING
+++ b/services/camera/virtualcamera/TEST_MAPPING
@@ -2,9 +2,7 @@
   "presubmit" : [
     {
       "name": "virtual_camera_tests"
-    }
-  ],
-  "postsubmit": [
+    },
     {
       "name": "CtsVirtualDevicesCameraTestCases",
       "options": [
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.cc b/services/camera/virtualcamera/VirtualCameraDevice.cc
index 84f721b..947b355 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.cc
+++ b/services/camera/virtualcamera/VirtualCameraDevice.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -23,11 +23,14 @@
 #include <chrono>
 #include <cstdint>
 #include <iterator>
+#include <numeric>
 #include <optional>
 #include <string>
+#include <vector>
 
 #include "VirtualCameraSession.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
@@ -44,7 +47,10 @@
 
 using ::aidl::android::companion::virtualcamera::Format;
 using ::aidl::android::companion::virtualcamera::IVirtualCameraCallback;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraResourceCost;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
@@ -64,26 +70,46 @@
 // Prefix of camera name - "device@1.1/virtual/{numerical_id}"
 const char* kDevicePathPrefix = "device@1.1/virtual/";
 
-constexpr std::chrono::nanoseconds kMinFrameDuration30Fps = 1s / 30;
 constexpr int32_t kMaxJpegSize = 3 * 1024 * 1024 /*3MiB*/;
 
+constexpr int32_t kMinFps = 15;
+
+constexpr std::chrono::nanoseconds kMaxFrameDuration =
+    std::chrono::duration_cast<std::chrono::nanoseconds>(1e9ns / kMinFps);
+
+constexpr uint8_t kPipelineMaxDepth = 2;
+
 constexpr MetadataBuilder::ControlRegion kDefaultEmptyControlRegion{};
 
-struct Resolution {
-  Resolution(const int w, const int h) : width(w), height(h) {
+const std::array<PixelFormat, 3> kOutputFormats{
+    PixelFormat::IMPLEMENTATION_DEFINED, PixelFormat::YCBCR_420_888,
+    PixelFormat::BLOB};
+
+bool isSupportedOutputFormat(const PixelFormat pixelFormat) {
+  return std::find(kOutputFormats.begin(), kOutputFormats.end(), pixelFormat) !=
+         kOutputFormats.end();
+}
+
+std::vector<MetadataBuilder::FpsRange> fpsRangesForInputConfig(
+    const std::vector<SupportedStreamConfiguration>& configs) {
+  std::set<MetadataBuilder::FpsRange> availableRanges;
+
+  for (const SupportedStreamConfiguration& config : configs) {
+    availableRanges.insert({.minFps = kMinFps, .maxFps = config.maxFps});
+    availableRanges.insert({.minFps = config.maxFps, .maxFps = config.maxFps});
   }
 
-  bool operator<(const Resolution& other) const {
-    return width * height < other.width * other.height;
+  if (std::any_of(configs.begin(), configs.end(),
+                  [](const SupportedStreamConfiguration& config) {
+                    return config.maxFps >= 30;
+                  })) {
+    availableRanges.insert({.minFps = kMinFps, .maxFps = 30});
+    availableRanges.insert({.minFps = 30, .maxFps = 30});
   }
 
-  bool operator==(const Resolution& other) const {
-    return width == other.width && height == other.height;
-  }
-
-  const int width;
-  const int height;
-};
+  return std::vector<MetadataBuilder::FpsRange>(availableRanges.begin(),
+                                                availableRanges.end());
+}
 
 std::optional<Resolution> getMaxResolution(
     const std::vector<SupportedStreamConfiguration>& configs) {
@@ -103,24 +129,34 @@
   return Resolution(itMax->width, itMax->height);
 }
 
-std::set<Resolution> getUniqueResolutions(
+// Returns a map of unique resolution to maximum maxFps for all streams with
+// that resolution.
+std::map<Resolution, int> getResolutionToMaxFpsMap(
     const std::vector<SupportedStreamConfiguration>& configs) {
-  std::set<Resolution> uniqueResolutions;
-  std::transform(configs.begin(), configs.end(),
-                 std::inserter(uniqueResolutions, uniqueResolutions.begin()),
-                 [](const SupportedStreamConfiguration& config) {
-                   return Resolution(config.width, config.height);
-                 });
-  return uniqueResolutions;
+  std::map<Resolution, int> resolutionToMaxFpsMap;
+
+  for (const SupportedStreamConfiguration& config : configs) {
+    Resolution resolution(config.width, config.height);
+    if (resolutionToMaxFpsMap.find(resolution) == resolutionToMaxFpsMap.end()) {
+      resolutionToMaxFpsMap[resolution] = config.maxFps;
+    } else {
+      int currentMaxFps = resolutionToMaxFpsMap[resolution];
+      resolutionToMaxFpsMap[resolution] = std::max(currentMaxFps, config.maxFps);
+    }
+  }
+
+  return resolutionToMaxFpsMap;
 }
 
 // TODO(b/301023410) - Populate camera characteristics according to camera configuration.
 std::optional<CameraMetadata> initCameraCharacteristics(
-    const std::vector<SupportedStreamConfiguration>& supportedInputConfig) {
+    const std::vector<SupportedStreamConfiguration>& supportedInputConfig,
+    const SensorOrientation sensorOrientation, const LensFacing lensFacing) {
   if (!std::all_of(supportedInputConfig.begin(), supportedInputConfig.end(),
                    [](const SupportedStreamConfiguration& config) {
                      return isFormatSupportedForInput(
-                         config.width, config.height, config.pixelFormat);
+                         config.width, config.height, config.pixelFormat,
+                         config.maxFps);
                    })) {
     ALOGE("%s: input configuration contains unsupported format", __func__);
     return std::nullopt;
@@ -131,26 +167,68 @@
           .setSupportedHardwareLevel(
               ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL)
           .setFlashAvailable(false)
-          .setLensFacing(ANDROID_LENS_FACING_EXTERNAL)
-          .setSensorOrientation(0)
+          .setLensFacing(
+              static_cast<camera_metadata_enum_android_lens_facing>(lensFacing))
+          .setAvailableFocalLengths({VirtualCameraDevice::kFocalLength})
+          .setSensorOrientation(static_cast<int32_t>(sensorOrientation))
+          .setSensorReadoutTimestamp(
+              ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED)
+          .setSensorTimestampSource(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN)
+          .setSensorPhysicalSize(36.0, 24.0)
           .setAvailableFaceDetectModes({ANDROID_STATISTICS_FACE_DETECT_MODE_OFF})
+          .setAvailableTestPatternModes({ANDROID_SENSOR_TEST_PATTERN_MODE_OFF})
           .setAvailableMaxDigitalZoom(1.0)
           .setControlAvailableModes({ANDROID_CONTROL_MODE_AUTO})
           .setControlAfAvailableModes({ANDROID_CONTROL_AF_MODE_OFF})
-          .setControlAeAvailableFpsRange(10, 30)
+          .setControlAvailableSceneModes({ANDROID_CONTROL_SCENE_MODE_DISABLED})
+          .setControlAvailableEffects({ANDROID_CONTROL_EFFECT_MODE_OFF})
+          .setControlAvailableVideoStabilizationModes(
+              {ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF})
+          .setControlAeAvailableModes({ANDROID_CONTROL_AE_MODE_ON})
+          .setControlAeAvailableAntibandingModes(
+              {ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO})
+          .setControlAeAvailableFpsRanges(
+              fpsRangesForInputConfig(supportedInputConfig))
           .setControlMaxRegions(0, 0, 0)
           .setControlAfRegions({kDefaultEmptyControlRegion})
           .setControlAeRegions({kDefaultEmptyControlRegion})
           .setControlAwbRegions({kDefaultEmptyControlRegion})
-          .setControlAeCompensationRange(0, 1)
+          .setControlAeCompensationRange(0, 0)
           .setControlAeCompensationStep(camera_metadata_rational_t{0, 1})
+          .setControlAwbLockAvailable(false)
+          .setControlAeLockAvailable(false)
+          .setControlAvailableAwbModes({ANDROID_CONTROL_AWB_MODE_AUTO})
           .setControlZoomRatioRange(/*min=*/1.0, /*max=*/1.0)
+          // TODO(b/301023410) Add JPEG Exif + thumbnail support.
+          .setJpegAvailableThumbnailSizes({Resolution(0, 0)})
           .setMaxJpegSize(kMaxJpegSize)
-          .setAvailableRequestKeys({ANDROID_CONTROL_AF_MODE})
-          .setAvailableResultKeys({ANDROID_CONTROL_AF_MODE})
+          .setMaxFrameDuration(kMaxFrameDuration)
+          .setMaxNumberOutputStreams(
+              VirtualCameraDevice::kMaxNumberOfRawStreams,
+              VirtualCameraDevice::kMaxNumberOfProcessedStreams,
+              VirtualCameraDevice::kMaxNumberOfStallStreams)
+          .setPipelineMaxDepth(kPipelineMaxDepth)
+          .setSyncMaxLatency(ANDROID_SYNC_MAX_LATENCY_UNKNOWN)
+          .setAvailableRequestKeys(
+              {ANDROID_CONTROL_CAPTURE_INTENT, ANDROID_CONTROL_AE_MODE,
+               ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+               ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+               ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+               ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, ANDROID_CONTROL_AF_TRIGGER,
+               ANDROID_CONTROL_AF_MODE, ANDROID_CONTROL_AWB_MODE,
+               ANDROID_SCALER_CROP_REGION, ANDROID_CONTROL_EFFECT_MODE,
+               ANDROID_CONTROL_MODE, ANDROID_CONTROL_SCENE_MODE,
+               ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+               ANDROID_CONTROL_ZOOM_RATIO, ANDROID_STATISTICS_FACE_DETECT_MODE,
+               ANDROID_FLASH_MODE})
+          .setAvailableResultKeys(
+              {ANDROID_CONTROL_AE_MODE, ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+               ANDROID_CONTROL_AF_MODE, ANDROID_CONTROL_AWB_MODE,
+               ANDROID_CONTROL_EFFECT_MODE, ANDROID_CONTROL_MODE,
+               ANDROID_FLASH_MODE, ANDROID_FLASH_STATE,
+               ANDROID_SENSOR_TIMESTAMP, ANDROID_LENS_FOCAL_LENGTH})
           .setAvailableCapabilities(
-              {ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE})
-          .setAvailableCharacteristicKeys();
+              {ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE});
 
   // Active array size must correspond to largest supported input resolution.
   std::optional<Resolution> maxResolution =
@@ -160,56 +238,36 @@
   }
   builder.setSensorActiveArraySize(0, 0, maxResolution->width,
                                    maxResolution->height);
+  builder.setSensorPixelArraySize(maxResolution->width, maxResolution->height);
 
   std::vector<MetadataBuilder::StreamConfiguration> outputConfigurations;
 
   // TODO(b/301023410) Add also all "standard" resolutions we can rescale the
   // streams to (all standard resolutions with same aspect ratio).
 
-  // Add IMPLEMENTATION_DEFINED format for all supported input resolutions.
-  std::set<Resolution> uniqueResolutions =
-      getUniqueResolutions(supportedInputConfig);
-  std::transform(
-      uniqueResolutions.begin(), uniqueResolutions.end(),
-      std::back_inserter(outputConfigurations),
-      [](const Resolution& resolution) {
-        return MetadataBuilder::StreamConfiguration{
-            .width = resolution.width,
-            .height = resolution.height,
-            .format = ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
-            .minFrameDuration = kMinFrameDuration30Fps,
-            .minStallDuration = 0s};
-      });
+  std::map<Resolution, int> resolutionToMaxFpsMap =
+      getResolutionToMaxFpsMap(supportedInputConfig);
 
-  // Add all supported configuration with explicit pixel format.
-  std::transform(supportedInputConfig.begin(), supportedInputConfig.end(),
-                 std::back_inserter(outputConfigurations),
-                 [](const SupportedStreamConfiguration& config) {
-                   return MetadataBuilder::StreamConfiguration{
-                       .width = config.width,
-                       .height = config.height,
-                       .format = static_cast<int>(config.pixelFormat),
-                       .minFrameDuration = kMinFrameDuration30Fps,
-                       .minStallDuration = 0s};
-                 });
-
-  // TODO(b/301023410) We currently don't support rescaling for still capture,
-  // so only announce BLOB support for formats exactly matching the input.
-  std::transform(uniqueResolutions.begin(), uniqueResolutions.end(),
-                 std::back_inserter(outputConfigurations),
-                 [](const Resolution& resolution) {
-                   return MetadataBuilder::StreamConfiguration{
-                       .width = resolution.width,
-                       .height = resolution.height,
-                       .format = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
-                       .minFrameDuration = kMinFrameDuration30Fps,
-                       .minStallDuration = 0s};
-                 });
+  // Add configurations for all unique input resolutions and output formats.
+  for (const PixelFormat format : kOutputFormats) {
+    std::transform(
+        resolutionToMaxFpsMap.begin(), resolutionToMaxFpsMap.end(),
+        std::back_inserter(outputConfigurations), [format](const auto& entry) {
+          Resolution resolution = entry.first;
+          int maxFps = entry.second;
+          return MetadataBuilder::StreamConfiguration{
+              .width = resolution.width,
+              .height = resolution.height,
+              .format = static_cast<int32_t>(format),
+              .minFrameDuration = std::chrono::nanoseconds(1s) / maxFps,
+              .minStallDuration = 0s};
+        });
+  }
 
   ALOGV("Adding %zu output configurations", outputConfigurations.size());
   builder.setAvailableOutputStreamConfigurations(outputConfigurations);
 
-  auto metadata = builder.build();
+  auto metadata = builder.setAvailableCharacteristicKeys().build();
   if (metadata == nullptr) {
     ALOGE("Failed to build metadata!");
     return CameraMetadata();
@@ -221,14 +279,13 @@
 }  // namespace
 
 VirtualCameraDevice::VirtualCameraDevice(
-    const uint32_t cameraId,
-    const std::vector<SupportedStreamConfiguration>& supportedInputConfig,
-    std::shared_ptr<IVirtualCameraCallback> virtualCameraClientCallback)
+    const uint32_t cameraId, const VirtualCameraConfiguration& configuration)
     : mCameraId(cameraId),
-      mVirtualCameraClientCallback(virtualCameraClientCallback),
-      mSupportedInputConfigurations(supportedInputConfig) {
-  std::optional<CameraMetadata> metadata =
-      initCameraCharacteristics(mSupportedInputConfigurations);
+      mVirtualCameraClientCallback(configuration.virtualCameraCallback),
+      mSupportedInputConfigurations(configuration.supportedStreamConfigs) {
+  std::optional<CameraMetadata> metadata = initCameraCharacteristics(
+      mSupportedInputConfigurations, configuration.sensorOrientation,
+      configuration.lensFacing);
   if (metadata.has_value()) {
     mCameraCharacteristics = *metadata;
   } else {
@@ -286,6 +343,13 @@
 
 bool VirtualCameraDevice::isStreamCombinationSupported(
     const StreamConfiguration& streamConfiguration) const {
+  if (streamConfiguration.streams.empty()) {
+    ALOGE("%s: Querying empty configuration", __func__);
+    return false;
+  }
+
+  int numberOfProcessedStreams = 0;
+  int numberOfStallStreams = 0;
   for (const Stream& stream : streamConfiguration.streams) {
     ALOGV("%s: Configuration queried: %s", __func__, stream.toString().c_str());
 
@@ -294,15 +358,18 @@
       return false;
     }
 
-    // TODO(b/301023410) remove hardcoded format checks, verify against configuration.
     if (stream.rotation != StreamRotation::ROTATION_0 ||
-        (stream.format != PixelFormat::IMPLEMENTATION_DEFINED &&
-         stream.format != PixelFormat::YCBCR_420_888 &&
-         stream.format != PixelFormat::BLOB)) {
+        !isSupportedOutputFormat(stream.format)) {
       ALOGV("Unsupported output stream type");
       return false;
     }
 
+    if (stream.format == PixelFormat::BLOB) {
+      numberOfStallStreams++;
+    } else {
+      numberOfProcessedStreams++;
+    }
+
     auto matchesSupportedInputConfig =
         [&stream](const SupportedStreamConfiguration& config) {
           return stream.width == config.width && stream.height == config.height;
@@ -314,6 +381,19 @@
       return false;
     }
   }
+
+  if (numberOfProcessedStreams > kMaxNumberOfProcessedStreams) {
+    ALOGE("%s: %d processed streams exceeds the supported maximum of %d",
+          __func__, numberOfProcessedStreams, kMaxNumberOfProcessedStreams);
+    return false;
+  }
+
+  if (numberOfStallStreams > kMaxNumberOfStallStreams) {
+    ALOGE("%s: %d stall streams exceeds the supported maximum of %d", __func__,
+          numberOfStallStreams, kMaxNumberOfStallStreams);
+    return false;
+  }
+
   return true;
 }
 
@@ -368,6 +448,24 @@
   return std::string(kDevicePathPrefix) + std::to_string(mCameraId);
 }
 
+const std::vector<SupportedStreamConfiguration>&
+VirtualCameraDevice::getInputConfigs() const {
+  return mSupportedInputConfigurations;
+}
+
+Resolution VirtualCameraDevice::getMaxInputResolution() const {
+  std::optional<Resolution> maxResolution =
+      getMaxResolution(mSupportedInputConfigurations);
+  if (!maxResolution.has_value()) {
+    ALOGE(
+        "%s: Cannot determine sensor size for virtual camera - input "
+        "configurations empty?",
+        __func__);
+    return Resolution(0, 0);
+  }
+  return maxResolution.value();
+}
+
 std::shared_ptr<VirtualCameraDevice> VirtualCameraDevice::sharedFromThis() {
   // SharedRefBase which BnCameraDevice inherits from breaks
   // std::enable_shared_from_this. This is recommended replacement for
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.h b/services/camera/virtualcamera/VirtualCameraDevice.h
index 402de6c..720f02e 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.h
+++ b/services/camera/virtualcamera/VirtualCameraDevice.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -22,7 +22,9 @@
 
 #include "aidl/android/companion/virtualcamera/IVirtualCameraCallback.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/device/BnCameraDevice.h"
+#include "util/Util.h"
 
 namespace android {
 namespace companion {
@@ -35,12 +37,8 @@
  public:
   explicit VirtualCameraDevice(
       uint32_t cameraId,
-      const std::vector<
-          aidl::android::companion::virtualcamera::SupportedStreamConfiguration>&
-          supportedInputConfig,
-      std::shared_ptr<
-          ::aidl::android::companion::virtualcamera::IVirtualCameraCallback>
-          virtualCameraClientCallback = nullptr);
+      const aidl::android::companion::virtualcamera::VirtualCameraConfiguration&
+          configuration);
 
   virtual ~VirtualCameraDevice() override = default;
 
@@ -97,6 +95,29 @@
 
   uint32_t getCameraId() const { return mCameraId; }
 
+  const std::vector<
+      aidl::android::companion::virtualcamera::SupportedStreamConfiguration>&
+  getInputConfigs() const;
+
+  // Returns largest supported input resolution.
+  Resolution getMaxInputResolution() const;
+
+  // Maximal number of RAW streams - virtual camera doesn't support RAW streams.
+  static const int32_t kMaxNumberOfRawStreams = 0;
+
+  // Maximal number of non-jpeg streams configured concurrently in single
+  // session. This should be at least 3 and can be increased at the potential
+  // cost of more CPU/GPU load if there are many concurrent streams.
+  static const int32_t kMaxNumberOfProcessedStreams = 3;
+
+  // Maximal number of stalling (in case of virtual camera only jpeg for now)
+  // streams. Can be increaed at the cost of potential cost of more GPU/CPU
+  // load.
+  static const int32_t kMaxNumberOfStallStreams = 1;
+
+  // Focal length for full frame sensor.
+  constexpr static const float kFocalLength = 43.0;
+
  private:
   std::shared_ptr<VirtualCameraDevice> sharedFromThis();
 
diff --git a/services/camera/virtualcamera/VirtualCameraProvider.cc b/services/camera/virtualcamera/VirtualCameraProvider.cc
index 25a43d6..e4a68f5 100644
--- a/services/camera/virtualcamera/VirtualCameraProvider.cc
+++ b/services/camera/virtualcamera/VirtualCameraProvider.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -33,8 +33,7 @@
 namespace companion {
 namespace virtualcamera {
 
-using ::aidl::android::companion::virtualcamera::IVirtualCameraCallback;
-using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraDeviceStatus;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::common::VendorTagSection;
@@ -155,10 +154,9 @@
 }
 
 std::shared_ptr<VirtualCameraDevice> VirtualCameraProvider::createCamera(
-    const std::vector<SupportedStreamConfiguration>& supportedInputConfig,
-    std::shared_ptr<IVirtualCameraCallback> virtualCameraClientCallback) {
-  auto camera = ndk::SharedRefBase::make<VirtualCameraDevice>(
-      sNextId++, supportedInputConfig, virtualCameraClientCallback);
+    const VirtualCameraConfiguration& configuration) {
+  auto camera =
+      ndk::SharedRefBase::make<VirtualCameraDevice>(sNextId++, configuration);
   std::shared_ptr<ICameraProviderCallback> callback;
   {
     const std::lock_guard<std::mutex> lock(mLock);
diff --git a/services/camera/virtualcamera/VirtualCameraProvider.h b/services/camera/virtualcamera/VirtualCameraProvider.h
index d41a005..11d3123 100644
--- a/services/camera/virtualcamera/VirtualCameraProvider.h
+++ b/services/camera/virtualcamera/VirtualCameraProvider.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -74,14 +74,9 @@
 
   // Create new virtual camera devices
   // Returns nullptr if creation was not successful.
-  //
-  // TODO(b/301023410) - Add camera configuration.
   std::shared_ptr<VirtualCameraDevice> createCamera(
-      const std::vector<
-          aidl::android::companion::virtualcamera::SupportedStreamConfiguration>&
-          supportedInputConfig,
-      std::shared_ptr<aidl::android::companion::virtualcamera::IVirtualCameraCallback>
-          virtualCameraClientCallback = nullptr);
+      const aidl::android::companion::virtualcamera::VirtualCameraConfiguration&
+          configuration);
 
   std::shared_ptr<VirtualCameraDevice> getCamera(const std::string& name);
 
@@ -105,4 +100,4 @@
 }  // namespace companion
 }  // namespace android
 
-#endif  // ANDROID_SERVICES_VIRTUAL_CAMERA_VIRTUALCAMERAPROVIDER_H
+#endif  // ANDROID_COMPANION_VIRTUALCAMERA_VIRTUALCAMERAPROVIDER_H
diff --git a/services/camera/virtualcamera/VirtualCameraRenderThread.cc b/services/camera/virtualcamera/VirtualCameraRenderThread.cc
index 79c91ef..7bbc6ea 100644
--- a/services/camera/virtualcamera/VirtualCameraRenderThread.cc
+++ b/services/camera/virtualcamera/VirtualCameraRenderThread.cc
@@ -18,7 +18,6 @@
 #include "VirtualCameraRenderThread.h"
 
 #include <chrono>
-#include <cstddef>
 #include <cstdint>
 #include <future>
 #include <memory>
@@ -26,6 +25,7 @@
 #include <thread>
 
 #include "GLES/gl.h"
+#include "VirtualCameraDevice.h"
 #include "VirtualCameraSessionContext.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/BufferStatus.h"
@@ -71,10 +71,31 @@
 
 static constexpr std::chrono::milliseconds kAcquireFenceTimeout = 500ms;
 
+// See REQUEST_PIPELINE_DEPTH in CaptureResult.java.
+// This roughly corresponds to frame latency, we set to
+// documented minimum of 2.
+static constexpr uint8_t kPipelineDepth = 2;
+
 CameraMetadata createCaptureResultMetadata(
-    const std::chrono::nanoseconds timestamp) {
+    const std::chrono::nanoseconds timestamp,
+    const Resolution reportedSensorSize) {
   std::unique_ptr<CameraMetadata> metadata =
-      MetadataBuilder().setSensorTimestamp(timestamp).build();
+      MetadataBuilder()
+          .setControlAeMode(ANDROID_CONTROL_AE_MODE_ON)
+          .setControlAePrecaptureTrigger(
+              ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE)
+          .setControlAfMode(ANDROID_CONTROL_AF_MODE_OFF)
+          .setControlAwbMode(ANDROID_CONTROL_AWB_MODE_AUTO)
+          .setControlEffectMode(ANDROID_CONTROL_EFFECT_MODE_OFF)
+          .setControlMode(ANDROID_CONTROL_MODE_AUTO)
+          .setCropRegion(0, 0, reportedSensorSize.width,
+                         reportedSensorSize.height)
+          .setFaceDetectMode(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF)
+          .setFlashState(ANDROID_FLASH_STATE_UNAVAILABLE)
+          .setFocalLength(VirtualCameraDevice::kFocalLength)
+          .setPipelineDepth(kPipelineDepth)
+          .setSensorTimestamp(timestamp)
+          .build();
   if (metadata == nullptr) {
     ALOGE("%s: Failed to build capture result metadata", __func__);
     return CameraMetadata();
@@ -170,12 +191,12 @@
 }
 
 VirtualCameraRenderThread::VirtualCameraRenderThread(
-    VirtualCameraSessionContext& sessionContext, const int mWidth,
-    const int mHeight,
+    VirtualCameraSessionContext& sessionContext,
+    const Resolution inputSurfaceSize, const Resolution reportedSensorSize,
     std::shared_ptr<ICameraDeviceCallback> cameraDeviceCallback, bool testMode)
     : mCameraDeviceCallback(cameraDeviceCallback),
-      mInputSurfaceWidth(mWidth),
-      mInputSurfaceHeight(mHeight),
+      mInputSurfaceSize(inputSurfaceSize),
+      mReportedSensorSize(reportedSensorSize),
       mTestMode(testMode),
       mSessionContext(sessionContext) {
 }
@@ -263,8 +284,8 @@
       std::make_unique<EglTextureProgram>(EglTextureProgram::TextureFormat::YUV);
   mEglTextureRgbProgram = std::make_unique<EglTextureProgram>(
       EglTextureProgram::TextureFormat::RGBA);
-  mEglSurfaceTexture = std::make_unique<EglSurfaceTexture>(mInputSurfaceWidth,
-                                                           mInputSurfaceHeight);
+  mEglSurfaceTexture = std::make_unique<EglSurfaceTexture>(
+      mInputSurfaceSize.width, mInputSurfaceSize.height);
   mInputSurfacePromise.set_value(mEglSurfaceTexture->getSurface());
 
   while (std::unique_ptr<ProcessCaptureRequestTask> task = dequeueTask()) {
@@ -287,7 +308,8 @@
   captureResult.partialResult = 1;
   captureResult.inputBuffer.streamId = -1;
   captureResult.physicalCameraMetadata.resize(0);
-  captureResult.result = createCaptureResultMetadata(timestamp);
+  captureResult.result =
+      createCaptureResultMetadata(timestamp, mReportedSensorSize);
 
   const std::vector<CaptureRequestBuffer>& buffers = request.getBuffers();
   captureResult.outputBuffers.resize(buffers.size());
@@ -542,8 +564,12 @@
   } else {
     const bool renderSuccess =
         isYuvFormat(static_cast<PixelFormat>(textureBuffer->getPixelFormat()))
-            ? mEglTextureYuvProgram->draw(mEglSurfaceTexture->updateTexture())
-            : mEglTextureRgbProgram->draw(mEglSurfaceTexture->updateTexture());
+            ? mEglTextureYuvProgram->draw(
+                  mEglSurfaceTexture->getTextureId(),
+                  mEglSurfaceTexture->getTransformMatrix())
+            : mEglTextureRgbProgram->draw(
+                  mEglSurfaceTexture->getTextureId(),
+                  mEglSurfaceTexture->getTransformMatrix());
     if (!renderSuccess) {
       ALOGE("%s: Failed to render texture", __func__);
       return cameraStatus(Status::INTERNAL_ERROR);
diff --git a/services/camera/virtualcamera/VirtualCameraRenderThread.h b/services/camera/virtualcamera/VirtualCameraRenderThread.h
index b3aaed8..c8f61f4 100644
--- a/services/camera/virtualcamera/VirtualCameraRenderThread.h
+++ b/services/camera/virtualcamera/VirtualCameraRenderThread.h
@@ -22,6 +22,7 @@
 #include <memory>
 #include <thread>
 
+#include "VirtualCameraDevice.h"
 #include "VirtualCameraSessionContext.h"
 #include "aidl/android/hardware/camera/device/ICameraDeviceCallback.h"
 #include "android/binder_auto_utils.h"
@@ -77,14 +78,14 @@
   // Create VirtualCameraRenderThread instance:
   // * sessionContext - VirtualCameraSessionContext reference for shared access
   // to mapped buffers.
-  // * inputWidth - requested width of input surface ("virtual camera sensor")
-  // * inputHeight - requested height of input surface ("virtual camera sensor")
+  // * inputSurfaceSize - requested size of input surface.
+  // * reportedSensorSize - reported static sensor size of virtual camera.
   // * cameraDeviceCallback - callback for corresponding camera instance
   // * testMode - when set to true, test pattern is rendered to input surface
   // before each capture request is processed to simulate client input.
   VirtualCameraRenderThread(
-      VirtualCameraSessionContext& sessionContext, int inputWidth,
-      int inputHeight,
+      VirtualCameraSessionContext& sessionContext, Resolution inputSurfaceSize,
+      Resolution reportedSensorSize,
       std::shared_ptr<
           ::aidl::android::hardware::camera::device::ICameraDeviceCallback>
           cameraDeviceCallback,
@@ -149,8 +150,8 @@
       ::aidl::android::hardware::camera::device::ICameraDeviceCallback>
       mCameraDeviceCallback;
 
-  const int mInputSurfaceWidth;
-  const int mInputSurfaceHeight;
+  const Resolution mInputSurfaceSize;
+  const Resolution mReportedSensorSize;
   const int mTestMode;
 
   VirtualCameraSessionContext& mSessionContext;
diff --git a/services/camera/virtualcamera/VirtualCameraService.cc b/services/camera/virtualcamera/VirtualCameraService.cc
index 370a5a8..1144997 100644
--- a/services/camera/virtualcamera/VirtualCameraService.cc
+++ b/services/camera/virtualcamera/VirtualCameraService.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -41,6 +41,8 @@
 namespace virtualcamera {
 
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
 using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 
@@ -48,6 +50,7 @@
 
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
+constexpr int kMaxFps = 60;
 constexpr char kEnableTestCameraCmd[] = "enable_test_camera";
 constexpr char kDisableTestCameraCmd[] = "disable_test_camera";
 constexpr char kShellCmdHelp[] = R"(
@@ -69,13 +72,29 @@
   for (const SupportedStreamConfiguration& config :
        configuration.supportedStreamConfigs) {
     if (!isFormatSupportedForInput(config.width, config.height,
-                                   config.pixelFormat)) {
+                                   config.pixelFormat, config.maxFps)) {
       ALOGE("%s: Requested unsupported input format: %d x %d (%d)", __func__,
             config.width, config.height, static_cast<int>(config.pixelFormat));
       return ndk::ScopedAStatus::fromServiceSpecificError(
           Status::EX_ILLEGAL_ARGUMENT);
     }
   }
+
+  if (configuration.sensorOrientation != SensorOrientation::ORIENTATION_0 &&
+      configuration.sensorOrientation != SensorOrientation::ORIENTATION_90 &&
+      configuration.sensorOrientation != SensorOrientation::ORIENTATION_180 &&
+      configuration.sensorOrientation != SensorOrientation::ORIENTATION_270) {
+    return ndk::ScopedAStatus::fromServiceSpecificError(
+        Status::EX_ILLEGAL_ARGUMENT);
+  }
+
+  if (configuration.lensFacing != LensFacing::FRONT &&
+      configuration.lensFacing != LensFacing::BACK &&
+      configuration.lensFacing != LensFacing::EXTERNAL) {
+    return ndk::ScopedAStatus::fromServiceSpecificError(
+        Status::EX_ILLEGAL_ARGUMENT);
+  }
+
   return ndk::ScopedAStatus::ok();
 }
 
@@ -121,10 +140,8 @@
     return ndk::ScopedAStatus::ok();
   }
 
-  // TODO(b/301023410) Validate configuration and pass it to the camera.
   std::shared_ptr<VirtualCameraDevice> camera =
-      mVirtualCameraProvider->createCamera(configuration.supportedStreamConfigs,
-                                           configuration.virtualCameraCallback);
+      mVirtualCameraProvider->createCamera(configuration);
   if (camera == nullptr) {
     ALOGE("Failed to create camera for binder token 0x%" PRIxPTR,
           reinterpret_cast<uintptr_t>(token.get()));
@@ -241,8 +258,11 @@
 
   bool ret;
   VirtualCameraConfiguration configuration;
-  configuration.supportedStreamConfigs.push_back(
-      {.width = kVgaWidth, .height = kVgaHeight, Format::YUV_420_888});
+  configuration.supportedStreamConfigs.push_back({.width = kVgaWidth,
+                                                  .height = kVgaHeight,
+                                                  Format::YUV_420_888,
+                                                  .maxFps = kMaxFps});
+  configuration.lensFacing = LensFacing::EXTERNAL;
   registerCamera(mTestCameraToken, configuration, &ret);
   if (ret) {
     dprintf(out, "Successfully registered test camera %s",
diff --git a/services/camera/virtualcamera/VirtualCameraSession.cc b/services/camera/virtualcamera/VirtualCameraSession.cc
index 47780d8..d1ec763 100644
--- a/services/camera/virtualcamera/VirtualCameraSession.cc
+++ b/services/camera/virtualcamera/VirtualCameraSession.cc
@@ -18,6 +18,7 @@
 #define LOG_TAG "VirtualCameraSession"
 #include "VirtualCameraSession.h"
 
+#include <algorithm>
 #include <atomic>
 #include <chrono>
 #include <cstddef>
@@ -26,6 +27,7 @@
 #include <map>
 #include <memory>
 #include <mutex>
+#include <numeric>
 #include <optional>
 #include <tuple>
 #include <unordered_set>
@@ -43,6 +45,7 @@
 #include "aidl/android/hardware/camera/device/CaptureRequest.h"
 #include "aidl/android/hardware/camera/device/HalStream.h"
 #include "aidl/android/hardware/camera/device/NotifyMsg.h"
+#include "aidl/android/hardware/camera/device/RequestTemplate.h"
 #include "aidl/android/hardware/camera/device/ShutterMsg.h"
 #include "aidl/android/hardware/camera/device/StreamBuffer.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
@@ -68,6 +71,7 @@
 
 using ::aidl::android::companion::virtualcamera::Format;
 using ::aidl::android::companion::virtualcamera::IVirtualCameraCallback;
+using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::BufferCache;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
@@ -101,31 +105,61 @@
 // Maximum number of buffers to use per single stream.
 static constexpr size_t kMaxStreamBuffers = 2;
 
-CameraMetadata createDefaultRequestSettings(RequestTemplate type) {
-  hardware::camera::common::V1_0::helper::CameraMetadata metadataHelper;
-
-  camera_metadata_enum_android_control_capture_intent_t intent =
-      ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+camera_metadata_enum_android_control_capture_intent_t requestTemplateToIntent(
+    const RequestTemplate type) {
   switch (type) {
     case RequestTemplate::PREVIEW:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
     case RequestTemplate::STILL_CAPTURE:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
     case RequestTemplate::VIDEO_RECORD:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
     case RequestTemplate::VIDEO_SNAPSHOT:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
     default:
-      // Leave default.
-      break;
+      // Return PREVIEW by default
+      return ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
   }
+}
 
-  auto metadata = MetadataBuilder().setControlCaptureIntent(intent).build();
-  return (metadata != nullptr) ? std::move(*metadata) : CameraMetadata();
+int getMaxFps(const std::vector<SupportedStreamConfiguration>& configs) {
+  return std::transform_reduce(
+      configs.begin(), configs.end(), 0,
+      [](const int a, const int b) { return std::max(a, b); },
+      [](const SupportedStreamConfiguration& config) { return config.maxFps; });
+}
+
+CameraMetadata createDefaultRequestSettings(
+    const RequestTemplate type,
+    const std::vector<SupportedStreamConfiguration>& inputConfigs) {
+  int maxFps = getMaxFps(inputConfigs);
+  auto metadata =
+      MetadataBuilder()
+          .setControlCaptureIntent(requestTemplateToIntent(type))
+          .setControlMode(ANDROID_CONTROL_MODE_AUTO)
+          .setControlAeMode(ANDROID_CONTROL_AE_MODE_ON)
+          .setControlAeExposureCompensation(0)
+          .setControlAeTargetFpsRange(maxFps, maxFps)
+          .setControlAeAntibandingMode(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO)
+          .setControlAePrecaptureTrigger(
+              ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE)
+          .setControlAfTrigger(ANDROID_CONTROL_AF_TRIGGER_IDLE)
+          .setControlAfMode(ANDROID_CONTROL_AF_MODE_OFF)
+          .setControlAwbMode(ANDROID_CONTROL_AWB_MODE_AUTO)
+          .setControlEffectMode(ANDROID_CONTROL_EFFECT_MODE_OFF)
+          .setFaceDetectMode(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF)
+          .setFlashMode(ANDROID_FLASH_MODE_OFF)
+          .setFlashState(ANDROID_FLASH_STATE_UNAVAILABLE)
+          .build();
+  if (metadata == nullptr) {
+    ALOGE("%s: Failed to construct metadata for default request type %s",
+          __func__, toString(type).c_str());
+    return CameraMetadata();
+  } else {
+    ALOGV("%s: Successfully created metadata for request type %s", __func__,
+          toString(type).c_str());
+  }
+  return *metadata;
 }
 
 HalStream getHalStream(const Stream& stream) {
@@ -150,6 +184,13 @@
   return halStream;
 }
 
+Stream getHighestResolutionStream(const std::vector<Stream>& streams) {
+  return *(std::max_element(streams.begin(), streams.end(),
+                            [](const Stream& a, const Stream& b) {
+                              return a.width * a.height < b.width * b.height;
+                            }));
+}
+
 }  // namespace
 
 VirtualCameraSession::VirtualCameraSession(
@@ -233,13 +274,15 @@
       }
     }
 
-    inputWidth = streams[0].width;
-    inputHeight = streams[0].height;
+    Stream maxResStream = getHighestResolutionStream(streams);
+    inputWidth = maxResStream.width;
+    inputHeight = maxResStream.height;
     if (mRenderThread == nullptr) {
       // If there's no client callback, start camera in test mode.
       const bool testMode = mVirtualCameraClientCallback == nullptr;
       mRenderThread = std::make_unique<VirtualCameraRenderThread>(
-          mSessionContext, inputWidth, inputHeight, mCameraDeviceCallback,
+          mSessionContext, Resolution(inputWidth, inputHeight),
+          virtualCamera->getMaxInputResolution(), mCameraDeviceCallback,
           testMode);
       mRenderThread->start();
       inputSurface = mRenderThread->getInputSurface();
@@ -263,12 +306,22 @@
     RequestTemplate in_type, CameraMetadata* _aidl_return) {
   ALOGV("%s: type %d", __func__, static_cast<int32_t>(in_type));
 
+  std::shared_ptr<VirtualCameraDevice> camera = mCameraDevice.lock();
+  if (camera == nullptr) {
+    ALOGW(
+        "%s: constructDefaultRequestSettings called on already unregistered "
+        "camera",
+        __func__);
+    return cameraStatus(Status::CAMERA_DISCONNECTED);
+  }
+
   switch (in_type) {
     case RequestTemplate::PREVIEW:
     case RequestTemplate::STILL_CAPTURE:
     case RequestTemplate::VIDEO_RECORD:
     case RequestTemplate::VIDEO_SNAPSHOT: {
-      *_aidl_return = createDefaultRequestSettings(in_type);
+      *_aidl_return =
+          createDefaultRequestSettings(in_type, camera->getInputConfigs());
       return ndk::ScopedAStatus::ok();
     }
     case RequestTemplate::MANUAL:
diff --git a/services/camera/virtualcamera/aidl/Android.bp b/services/camera/virtualcamera/aidl/Android.bp
index fdeb7f2..a02d390 100644
--- a/services/camera/virtualcamera/aidl/Android.bp
+++ b/services/camera/virtualcamera/aidl/Android.bp
@@ -8,9 +8,11 @@
     unstable: true,
     srcs: [
         "android/companion/virtualcamera/Format.aidl",
+        "android/companion/virtualcamera/LensFacing.aidl",
         "android/companion/virtualcamera/IVirtualCameraCallback.aidl",
         "android/companion/virtualcamera/IVirtualCameraService.aidl",
         "android/companion/virtualcamera/VirtualCameraConfiguration.aidl",
+        "android/companion/virtualcamera/SensorOrientation.aidl",
         "android/companion/virtualcamera/SupportedStreamConfiguration.aidl",
     ],
     local_include_dir: ".",
@@ -34,6 +36,6 @@
         java: {
             enabled: true,
             platform_apis: true,
-        }
+        },
     },
 }
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl
index cbe03e9..f5a84f7 100644
--- a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -36,7 +36,8 @@
      * @param height - height of the surface.
      * @param pixelFormat - pixel format of the surface.
      */
-    void onStreamConfigured(int streamId, in Surface surface, int width, int height, in Format pixelFormat);
+    void onStreamConfigured(int streamId, in Surface surface, int width, int height,
+            in Format pixelFormat);
 
     /**
      * Called when framework requests capture. This can be used by the client as a hint
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/LensFacing.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/LensFacing.aidl
new file mode 100644
index 0000000..8568c91
--- /dev/null
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/LensFacing.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.companion.virtualcamera;
+
+/**
+ * Direction that the virtual camera faces relative to the device's screen.
+ *
+ * @hide
+ */
+@Backing(type="int")
+enum LensFacing {
+    FRONT = 0,
+    BACK = 1,
+    EXTERNAL = 2,
+}
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SensorOrientation.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SensorOrientation.aidl
new file mode 100644
index 0000000..ef91f00
--- /dev/null
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SensorOrientation.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.companion.virtualcamera;
+
+/**
+ * Sensor orientation of a virtual camera.
+ *
+ * @hide
+ */
+@Backing(type="int")
+enum SensorOrientation {
+    ORIENTATION_0 = 0,
+    ORIENTATION_90 = 90,
+    ORIENTATION_180 = 180,
+    ORIENTATION_270 = 270,
+}
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl
index 7070cbd..6f86cbe 100644
--- a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.companion.virtualcamera;
 
 import android.companion.virtualcamera.Format;
@@ -26,4 +27,5 @@
     int width;
     int height;
     Format pixelFormat = Format.UNKNOWN;
+    int maxFps;
 }
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl
index c1a2f22..887ad26 100644
--- a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,9 +13,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.companion.virtualcamera;
 
 import android.companion.virtualcamera.IVirtualCameraCallback;
+import android.companion.virtualcamera.LensFacing;
+import android.companion.virtualcamera.SensorOrientation;
 import android.companion.virtualcamera.SupportedStreamConfiguration;
 
 /**
@@ -26,4 +29,6 @@
 parcelable VirtualCameraConfiguration {
     SupportedStreamConfiguration[] supportedStreamConfigs;
     IVirtualCameraCallback virtualCameraCallback;
+    SensorOrientation sensorOrientation = SensorOrientation.ORIENTATION_0;
+    LensFacing lensFacing;
 }
diff --git a/services/camera/virtualcamera/flags/Android.bp b/services/camera/virtualcamera/flags/Android.bp
new file mode 100644
index 0000000..3ff67ea
--- /dev/null
+++ b/services/camera/virtualcamera/flags/Android.bp
@@ -0,0 +1,33 @@
+soong_config_module_type {
+    name: "virtual_device_build_flags_cc_defaults",
+    module_type: "cc_defaults",
+    config_namespace: "vdm",
+    bool_variables: [
+        "virtual_camera_service_enabled",
+    ],
+    properties: [
+        "cflags",
+    ],
+}
+
+soong_config_bool_variable {
+    name: "virtual_camera_service_enabled",
+}
+
+virtual_device_build_flags_cc_defaults {
+    name: "virtual_device_build_flags_defaults",
+    soong_config_variables: {
+        virtual_camera_service_enabled: {
+            cflags: ["-DVIRTUAL_CAMERA_SERVICE_ENABLED=1"],
+        },
+    },
+}
+
+cc_library_static {
+    name: "libvirtualdevicebuildflags",
+    srcs: [
+        "android_companion_virtualdevice_build_flags.cc",
+    ],
+    export_include_dirs: ["."],
+    defaults: ["virtual_device_build_flags_defaults"],
+}
diff --git a/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.cc b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.cc
new file mode 100644
index 0000000..5525bc9
--- /dev/null
+++ b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+namespace companion {
+namespace virtualdevice {
+namespace flags {
+
+bool virtual_camera_service_build_flag() {
+#if VIRTUAL_CAMERA_SERVICE_ENABLED
+  return true;
+#else
+  return false;
+#endif
+}
+
+}  // namespace flags
+}  // namespace virtualdevice
+}  // namespace companion
+}  // namespace android
diff --git a/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.h b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.h
new file mode 100644
index 0000000..718ce9b
--- /dev/null
+++ b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+namespace companion {
+namespace virtualdevice {
+namespace flags {
+
+// Returns true if the virtual camera service is enabled
+// in the build.
+//
+// TODO(b/309090563) - Deprecate in favor of autogened library to query build
+// flags once available.
+bool virtual_camera_service_build_flag();
+
+}  // namespace flags
+}  // namespace virtualdevice
+}  // namespace companion
+}  // namespace android
diff --git a/services/camera/virtualcamera/fuzzer/Android.bp b/services/camera/virtualcamera/fuzzer/Android.bp
index 71e8f50..afa1e44 100644
--- a/services/camera/virtualcamera/fuzzer/Android.bp
+++ b/services/camera/virtualcamera/fuzzer/Android.bp
@@ -15,7 +15,7 @@
  * limitations under the License.
  *
  *****************************************************************************/
- package {
+package {
     // See: http://go/android-license-faq
     default_applicable_licenses: ["Android-Apache-2.0"],
 }
diff --git a/services/camera/virtualcamera/tests/Android.bp b/services/camera/virtualcamera/tests/Android.bp
index bc46ba0..13104c1 100644
--- a/services/camera/virtualcamera/tests/Android.bp
+++ b/services/camera/virtualcamera/tests/Android.bp
@@ -14,11 +14,13 @@
         "libgtest",
         "libgmock",
     ],
-    srcs: ["EglUtilTest.cc",
-           "VirtualCameraDeviceTest.cc",
-           "VirtualCameraProviderTest.cc",
-           "VirtualCameraRenderThreadTest.cc",
-           "VirtualCameraServiceTest.cc",
-           "VirtualCameraSessionTest.cc"],
+    srcs: [
+        "EglUtilTest.cc",
+        "VirtualCameraDeviceTest.cc",
+        "VirtualCameraProviderTest.cc",
+        "VirtualCameraRenderThreadTest.cc",
+        "VirtualCameraServiceTest.cc",
+        "VirtualCameraSessionTest.cc",
+    ],
     test_suites: ["device-tests"],
 }
diff --git a/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc b/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
index 140ae65..35bf752 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,13 +14,17 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+#include <iterator>
 #include <memory>
 
 #include "VirtualCameraDevice.h"
 #include "aidl/android/companion/virtualcamera/Format.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
+#include "aidl/android/hardware/graphics/common/PixelFormat.h"
 #include "android/binder_interface_utils.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
@@ -34,7 +38,10 @@
 namespace {
 
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
 using ::aidl::android::hardware::camera::device::Stream;
 using ::aidl::android::hardware::camera::device::StreamConfiguration;
@@ -49,6 +56,21 @@
 constexpr int kVgaHeight = 480;
 constexpr int kHdWidth = 1280;
 constexpr int kHdHeight = 720;
+constexpr int kMaxFps = 30;
+
+const Stream kVgaYUV420Stream = Stream{
+    .streamType = StreamType::OUTPUT,
+    .width = kVgaWidth,
+    .height = kVgaHeight,
+    .format = PixelFormat::YCBCR_420_888,
+};
+
+const Stream kVgaJpegStream = Stream{
+    .streamType = StreamType::OUTPUT,
+    .width = kVgaWidth,
+    .height = kVgaHeight,
+    .format = PixelFormat::BLOB,
+};
 
 struct AvailableStreamConfiguration {
   const int width;
@@ -96,18 +118,19 @@
 }
 
 struct VirtualCameraConfigTestParam {
-  std::vector<SupportedStreamConfiguration> inputConfig;
+  VirtualCameraConfiguration inputConfig;
   std::vector<AvailableStreamConfiguration> expectedAvailableStreamConfigs;
 };
 
-class VirtualCameraDeviceTest
+class VirtualCameraDeviceCharacterisicsTest
     : public testing::TestWithParam<VirtualCameraConfigTestParam> {};
 
-TEST_P(VirtualCameraDeviceTest, cameraCharacteristicsForInputFormat) {
+TEST_P(VirtualCameraDeviceCharacterisicsTest,
+       cameraCharacteristicsForInputFormat) {
   const VirtualCameraConfigTestParam& param = GetParam();
   std::shared_ptr<VirtualCameraDevice> camera =
-      ndk::SharedRefBase::make<VirtualCameraDevice>(
-          kCameraId, param.inputConfig, /*virtualCameraClientCallback=*/nullptr);
+      ndk::SharedRefBase::make<VirtualCameraDevice>(kCameraId,
+                                                    param.inputConfig);
 
   CameraMetadata metadata;
   ASSERT_TRUE(camera->getCameraCharacteristics(&metadata).isOk());
@@ -132,13 +155,19 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(
-    cameraCharacteristicsForInputFormat, VirtualCameraDeviceTest,
+    cameraCharacteristicsForInputFormat, VirtualCameraDeviceCharacterisicsTest,
     testing::Values(
         VirtualCameraConfigTestParam{
-            .inputConfig = {SupportedStreamConfiguration{
-                .width = kVgaWidth,
-                .height = kVgaHeight,
-                .pixelFormat = Format::YUV_420_888}},
+            .inputConfig =
+                VirtualCameraConfiguration{
+                    .supportedStreamConfigs = {SupportedStreamConfiguration{
+                        .width = kVgaWidth,
+                        .height = kVgaHeight,
+                        .pixelFormat = Format::YUV_420_888,
+                        .maxFps = kMaxFps}},
+                    .virtualCameraCallback = nullptr,
+                    .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                    .lensFacing = LensFacing::FRONT},
             .expectedAvailableStreamConfigs =
                 {AvailableStreamConfiguration{
                      .width = kVgaWidth,
@@ -160,14 +189,22 @@
                      .streamConfiguration =
                          ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}}},
         VirtualCameraConfigTestParam{
-            .inputConfig = {SupportedStreamConfiguration{
-                                .width = kVgaWidth,
-                                .height = kVgaHeight,
-                                .pixelFormat = Format::YUV_420_888},
-                            SupportedStreamConfiguration{
-                                .width = kHdWidth,
-                                .height = kHdHeight,
-                                .pixelFormat = Format::YUV_420_888}},
+            .inputConfig =
+                VirtualCameraConfiguration{
+                    .supportedStreamConfigs =
+                        {SupportedStreamConfiguration{
+                             .width = kVgaWidth,
+                             .height = kVgaHeight,
+                             .pixelFormat = Format::YUV_420_888,
+                             .maxFps = kMaxFps},
+                         SupportedStreamConfiguration{
+                             .width = kHdWidth,
+                             .height = kHdHeight,
+                             .pixelFormat = Format::YUV_420_888,
+                             .maxFps = kMaxFps}},
+                    .virtualCameraCallback = nullptr,
+                    .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                    .lensFacing = LensFacing::BACK},
             .expectedAvailableStreamConfigs = {
                 AvailableStreamConfiguration{
                     .width = kVgaWidth,
@@ -208,6 +245,67 @@
                     .streamConfiguration =
                         ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}}}));
 
+class VirtualCameraDeviceTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    mCamera = ndk::SharedRefBase::make<VirtualCameraDevice>(
+        kCameraId, VirtualCameraConfiguration{
+                       .supportedStreamConfigs = {SupportedStreamConfiguration{
+                           .width = kVgaWidth,
+                           .height = kVgaHeight,
+                           .pixelFormat = Format::YUV_420_888,
+                           .maxFps = kMaxFps}},
+                       .virtualCameraCallback = nullptr,
+                       .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                       .lensFacing = LensFacing::FRONT});
+  }
+
+ protected:
+  std::shared_ptr<VirtualCameraDevice> mCamera;
+};
+
+TEST_F(VirtualCameraDeviceTest, configureMaximalNumberOfNonStallStreamsSuceeds) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfProcessedStreams,
+              kVgaYUV420Stream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_TRUE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, configureTooManyNonStallStreamsFails) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfProcessedStreams + 1,
+              kVgaYUV420Stream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_FALSE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, configureMaximalNumberOfStallStreamsSuceeds) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfStallStreams, kVgaJpegStream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_TRUE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, configureTooManyStallStreamsFails) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfStallStreams + 1, kVgaJpegStream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_FALSE(aidl_ret);
+}
+
 }  // namespace
 }  // namespace virtualcamera
 }  // namespace companion
diff --git a/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc b/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc
index 615a77c..ab647a4 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -33,7 +33,10 @@
 namespace {
 
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraDeviceStatus;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::common::TorchModeStatus;
@@ -49,6 +52,7 @@
 
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
+constexpr int kMaxFps = 30;
 constexpr char kVirtualCameraNameRegex[] =
     "device@[0-9]+\\.[0-9]+/virtual/[0-9]+";
 
@@ -79,10 +83,15 @@
   std::shared_ptr<VirtualCameraProvider> mCameraProvider;
   std::shared_ptr<MockCameraProviderCallback> mMockCameraProviderCallback =
       ndk::SharedRefBase::make<MockCameraProviderCallback>();
-  std::vector<SupportedStreamConfiguration> mInputConfigs = {
-      SupportedStreamConfiguration{.width = kVgaWidth,
-                                   .height = kVgaHeight,
-                                   .pixelFormat = Format::YUV_420_888}};
+  VirtualCameraConfiguration mInputConfig = VirtualCameraConfiguration{
+      .supportedStreamConfigs = {SupportedStreamConfiguration{
+          .width = kVgaWidth,
+          .height = kVgaHeight,
+          .pixelFormat = Format::YUV_420_888,
+          .maxFps = kMaxFps}},
+      .virtualCameraCallback = nullptr,
+      .sensorOrientation = SensorOrientation::ORIENTATION_0,
+      .lensFacing = LensFacing::FRONT};
 };
 
 TEST_F(VirtualCameraProviderTest, SetNullCameraCallbackFails) {
@@ -109,7 +118,7 @@
 
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
   EXPECT_THAT(camera, Not(IsNull()));
   EXPECT_THAT(camera->getCameraName(), MatchesRegex(kVirtualCameraNameRegex));
 
@@ -127,7 +136,7 @@
       .WillOnce(Return(ndk::ScopedAStatus::ok()));
 
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
 
   // Created camera should be in the list of cameras.
@@ -139,7 +148,7 @@
 TEST_F(VirtualCameraProviderTest, RemoveCamera) {
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
 
   EXPECT_CALL(*mMockCameraProviderCallback,
               cameraDeviceStatusChange(Eq(camera->getCameraName()),
@@ -156,7 +165,7 @@
 TEST_F(VirtualCameraProviderTest, RemoveNonExistingCamera) {
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
 
   // Removing non-existing camera should fail.
   const std::string cameraName = "DefinitelyNoTCamera";
diff --git a/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc b/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc
index 5f899b8..ddcb789 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc
@@ -33,6 +33,7 @@
 #include "android/binder_auto_utils.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "util/Util.h"
 
 namespace android {
 namespace companion {
@@ -62,6 +63,7 @@
 
 constexpr int kInputWidth = 640;
 constexpr int kInputHeight = 480;
+const Resolution kInputResolution(kInputWidth, kInputHeight);
 
 Matcher<StreamBuffer> IsStreamBufferWithStatus(const int streamId,
                                                const int bufferId,
@@ -102,7 +104,8 @@
     mMockCameraDeviceCallback =
         ndk::SharedRefBase::make<MockCameraDeviceCallback>();
     mRenderThread = std::make_unique<VirtualCameraRenderThread>(
-        *mSessionContext, kInputWidth, kInputHeight, mMockCameraDeviceCallback);
+        *mSessionContext, kInputResolution,
+        /*reportedSensorSize*/ kInputResolution, mMockCameraDeviceCallback);
   }
 
  protected:
diff --git a/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc b/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc
index 38261fb..d4d00a2 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -39,6 +39,8 @@
 
 using ::aidl::android::companion::virtualcamera::BnVirtualCameraCallback;
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraDeviceStatus;
 using ::aidl::android::hardware::camera::common::TorchModeStatus;
@@ -56,16 +58,25 @@
 
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
+constexpr int kMaxFps = 30;
+constexpr SensorOrientation kSensorOrientation =
+    SensorOrientation::ORIENTATION_0;
+constexpr LensFacing kLensFacing = LensFacing::FRONT;
 constexpr char kCreateVirtualDevicePermissions[] =
     "android.permission.CREATE_VIRTUAL_DEVICE";
 
 const VirtualCameraConfiguration kEmptyVirtualCameraConfiguration;
 
 VirtualCameraConfiguration createConfiguration(const int width, const int height,
-                                               const Format format) {
+                                               const Format format,
+                                               const int maxFps) {
   VirtualCameraConfiguration configuration;
-  configuration.supportedStreamConfigs.push_back(
-      {.width = width, .height = height, .pixelFormat = format});
+  configuration.supportedStreamConfigs.push_back({.width = width,
+                                                  .height = height,
+                                                  .pixelFormat = format,
+                                                  .maxFps = maxFps});
+  configuration.sensorOrientation = kSensorOrientation;
+  configuration.lensFacing = kLensFacing;
   return configuration;
 }
 
@@ -150,7 +161,7 @@
   int mDevNullFd;
 
   VirtualCameraConfiguration mVgaYUV420OnlyConfiguration =
-      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888);
+      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888, kMaxFps);
 };
 
 TEST_F(VirtualCameraServiceTest, RegisterCameraWithYuvInputSucceeds) {
@@ -173,7 +184,7 @@
   bool aidlRet;
 
   VirtualCameraConfiguration config =
-      createConfiguration(kVgaWidth, kVgaHeight, Format::RGBA_8888);
+      createConfiguration(kVgaWidth, kVgaHeight, Format::RGBA_8888, kMaxFps);
 
   ASSERT_TRUE(mCameraService->registerCamera(ndkToken, config, &aidlRet).isOk());
 
@@ -208,7 +219,7 @@
   bool aidlRet;
 
   VirtualCameraConfiguration config =
-      createConfiguration(kVgaWidth, kVgaHeight, Format::UNKNOWN);
+      createConfiguration(kVgaWidth, kVgaHeight, Format::UNKNOWN, kMaxFps);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
@@ -219,7 +230,7 @@
 TEST_F(VirtualCameraServiceTest, ConfigurationWithTooHighResFails) {
   bool aidlRet;
   VirtualCameraConfiguration config =
-      createConfiguration(1000000, 1000000, Format::YUV_420_888);
+      createConfiguration(1000000, 1000000, Format::YUV_420_888, kMaxFps);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
@@ -230,7 +241,7 @@
 TEST_F(VirtualCameraServiceTest, ConfigurationWithUnalignedResolutionFails) {
   bool aidlRet;
   VirtualCameraConfiguration config =
-      createConfiguration(641, 481, Format::YUV_420_888);
+      createConfiguration(641, 481, Format::YUV_420_888, kMaxFps);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
@@ -241,7 +252,29 @@
 TEST_F(VirtualCameraServiceTest, ConfigurationWithNegativeResolutionFails) {
   bool aidlRet;
   VirtualCameraConfiguration config =
-      createConfiguration(-1, kVgaHeight, Format::YUV_420_888);
+      createConfiguration(-1, kVgaHeight, Format::YUV_420_888, kMaxFps);
+
+  ASSERT_FALSE(
+      mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
+  EXPECT_FALSE(aidlRet);
+  EXPECT_THAT(getCameraIds(), IsEmpty());
+}
+
+TEST_F(VirtualCameraServiceTest, ConfigurationWithTooLowMaxFpsFails) {
+  bool aidlRet;
+  VirtualCameraConfiguration config =
+      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888, 0);
+
+  ASSERT_FALSE(
+      mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
+  EXPECT_FALSE(aidlRet);
+  EXPECT_THAT(getCameraIds(), IsEmpty());
+}
+
+TEST_F(VirtualCameraServiceTest, ConfigurationWithTooHighMaxFpsFails) {
+  bool aidlRet;
+  VirtualCameraConfiguration config =
+      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888, 90);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
diff --git a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
index 30bd2b6..446c679 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
 #include "VirtualCameraSession.h"
 #include "aidl/android/companion/virtualcamera/BnVirtualCameraCallback.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/BnCameraDeviceCallback.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
@@ -36,14 +37,21 @@
 namespace virtualcamera {
 namespace {
 
-constexpr int kWidth = 640;
-constexpr int kHeight = 480;
+constexpr int kVgaWidth = 640;
+constexpr int kVgaHeight = 480;
+constexpr int kSvgaWidth = 800;
+constexpr int kSvgaHeight = 600;
+constexpr int kMaxFps = 30;
 constexpr int kStreamId = 0;
+constexpr int kSecondStreamId = 1;
 constexpr int kCameraId = 42;
 
 using ::aidl::android::companion::virtualcamera::BnVirtualCameraCallback;
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::BnCameraDeviceCallback;
 using ::aidl::android::hardware::camera::device::BufferRequest;
@@ -105,11 +113,20 @@
         ndk::SharedRefBase::make<MockVirtualCameraCallback>();
     mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
         kCameraId,
-        std::vector<SupportedStreamConfiguration>{
-            SupportedStreamConfiguration{.width = kWidth,
-                                         .height = kHeight,
-                                         .pixelFormat = Format::YUV_420_888}},
-        mMockVirtualCameraClientCallback);
+        VirtualCameraConfiguration{
+            .supportedStreamConfigs = {SupportedStreamConfiguration{
+                                           .width = kVgaWidth,
+                                           .height = kVgaHeight,
+                                           .pixelFormat = Format::YUV_420_888,
+                                           .maxFps = kMaxFps},
+                                       SupportedStreamConfiguration{
+                                           .width = kSvgaWidth,
+                                           .height = kSvgaHeight,
+                                           .pixelFormat = Format::YUV_420_888,
+                                           .maxFps = kMaxFps}},
+            .virtualCameraCallback = nullptr,
+            .sensorOrientation = SensorOrientation::ORIENTATION_0,
+            .lensFacing = LensFacing::FRONT});
     mVirtualCameraSession = ndk::SharedRefBase::make<VirtualCameraSession>(
         mVirtualCameraDevice, mMockCameraDeviceCallback,
         mMockVirtualCameraClientCallback);
@@ -146,18 +163,22 @@
   PixelFormat format = PixelFormat::YCBCR_420_888;
   StreamConfiguration streamConfiguration;
   streamConfiguration.streams = {
-      createStream(kStreamId, kWidth, kHeight, format)};
+      createStream(kStreamId, kVgaWidth, kVgaHeight, format),
+      createStream(kSecondStreamId, kSvgaWidth, kSvgaHeight, format)};
   std::vector<HalStream> halStreams;
-  EXPECT_CALL(
-      *mMockVirtualCameraClientCallback,
-      onStreamConfigured(kStreamId, _, kWidth, kHeight, Format::YUV_420_888));
+
+  // Expect highest resolution to be picked for the client input.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId, _, kSvgaWidth, kSvgaHeight,
+                                 Format::YUV_420_888));
 
   ASSERT_TRUE(
       mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
           .isOk());
 
   EXPECT_THAT(halStreams, SizeIs(streamConfiguration.streams.size()));
-  EXPECT_THAT(mVirtualCameraSession->getStreamIds(), ElementsAre(0));
+  EXPECT_THAT(mVirtualCameraSession->getStreamIds(),
+              ElementsAre(kStreamId, kSecondStreamId));
 }
 
 TEST_F(VirtualCameraSessionTest, SecondConfigureDropsUnreferencedStreams) {
@@ -165,18 +186,18 @@
   StreamConfiguration streamConfiguration;
   std::vector<HalStream> halStreams;
 
-  streamConfiguration.streams = {createStream(0, kWidth, kHeight, format),
-                                 createStream(1, kWidth, kHeight, format),
-                                 createStream(2, kWidth, kHeight, format)};
+  streamConfiguration.streams = {createStream(0, kVgaWidth, kVgaHeight, format),
+                                 createStream(1, kVgaWidth, kVgaHeight, format),
+                                 createStream(2, kVgaWidth, kVgaHeight, format)};
   ASSERT_TRUE(
       mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
           .isOk());
 
   EXPECT_THAT(mVirtualCameraSession->getStreamIds(), ElementsAre(0, 1, 2));
 
-  streamConfiguration.streams = {createStream(0, kWidth, kHeight, format),
-                                 createStream(2, kWidth, kHeight, format),
-                                 createStream(3, kWidth, kHeight, format)};
+  streamConfiguration.streams = {createStream(0, kVgaWidth, kVgaHeight, format),
+                                 createStream(2, kVgaWidth, kVgaHeight, format),
+                                 createStream(3, kVgaWidth, kVgaHeight, format)};
   ASSERT_TRUE(
       mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
           .isOk());
@@ -201,8 +222,8 @@
 
 TEST_F(VirtualCameraSessionTest, onProcessCaptureRequestTriggersClientCallback) {
   StreamConfiguration streamConfiguration;
-  streamConfiguration.streams = {
-      createStream(kStreamId, kWidth, kHeight, PixelFormat::YCBCR_420_888)};
+  streamConfiguration.streams = {createStream(kStreamId, kVgaWidth, kVgaHeight,
+                                              PixelFormat::YCBCR_420_888)};
   std::vector<CaptureRequest> requests(1);
   requests[0].frameNumber = 42;
   requests[0].settings = *(
@@ -226,8 +247,8 @@
 
 TEST_F(VirtualCameraSessionTest, configureAfterCameraRelease) {
   StreamConfiguration streamConfiguration;
-  streamConfiguration.streams = {
-      createStream(kStreamId, kWidth, kHeight, PixelFormat::YCBCR_420_888)};
+  streamConfiguration.streams = {createStream(kStreamId, kVgaWidth, kVgaHeight,
+                                              PixelFormat::YCBCR_420_888)};
   std::vector<HalStream> halStreams;
 
   // Release virtual camera.
@@ -240,6 +261,17 @@
       Eq(static_cast<int32_t>(Status::CAMERA_DISCONNECTED)));
 }
 
+TEST_F(VirtualCameraSessionTest, ConfigureWithEmptyStreams) {
+  StreamConfiguration streamConfiguration;
+  std::vector<HalStream> halStreams;
+
+  // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+  EXPECT_THAT(
+      mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .getServiceSpecificError(),
+      Eq(static_cast<int32_t>(Status::ILLEGAL_ARGUMENT)));
+}
+
 }  // namespace
 }  // namespace virtualcamera
 }  // namespace companion
diff --git a/services/camera/virtualcamera/util/EglProgram.cc b/services/camera/virtualcamera/util/EglProgram.cc
index 510fd33..7554a67 100644
--- a/services/camera/virtualcamera/util/EglProgram.cc
+++ b/services/camera/virtualcamera/util/EglProgram.cc
@@ -68,12 +68,13 @@
     })";
 
 constexpr char kExternalTextureVertexShader[] = R"(#version 300 es
+  uniform mat4 aTextureTransformMatrix; // Transform matrix given by surface texture.
   in vec4 aPosition;
   in vec2 aTextureCoord;
   out vec2 vTextureCoord;
   void main() {
     gl_Position = aPosition;
-    vTextureCoord = aTextureCoord;
+    vTextureCoord = (aTextureTransformMatrix * vec4(aTextureCoord, 0.0, 1.0)).xy;
   })";
 
 constexpr char kExternalYuvTextureFragmentShader[] = R"(#version 300 es
@@ -100,10 +101,12 @@
     })";
 
 constexpr int kCoordsPerVertex = 3;
-constexpr std::array<float, 12> kSquareCoords{-1.f, 1.0f, 0.0f,  // top left
-                                              -1.f, -1.f, 0.0f,  // bottom left
-                                              1.0f, -1.f, 0.0f,  // bottom right
-                                              1.0f, 1.0f, 0.0f};  // top right
+
+constexpr std::array<float, 12> kSquareCoords{
+    -1.f, -1.0f, 0.0f,   // top left
+    -1.f, 1.f,   0.0f,   // bottom left
+    1.0f, 1.f,   0.0f,   // bottom right
+    1.0f, -1.0f, 0.0f};  // top right
 
 constexpr std::array<float, 8> kTextureCoords{0.0f, 1.0f,   // top left
                                               0.0f, 0.0f,   // bottom left
@@ -265,32 +268,50 @@
   } else {
     ALOGE("External texture EGL shader program initialization failed.");
   }
+
+  // Lookup and cache handles to uniforms & attributes.
+  mPositionHandle = glGetAttribLocation(mProgram, "aPosition");
+  mTextureCoordHandle = glGetAttribLocation(mProgram, "aTextureCoord");
+  mTransformMatrixHandle =
+      glGetUniformLocation(mProgram, "aTextureTransformMatrix");
+  mTextureHandle = glGetUniformLocation(mProgram, "uTexture");
+
+  // Pass vertex array to the shader.
+  glEnableVertexAttribArray(mPositionHandle);
+  glVertexAttribPointer(mPositionHandle, kCoordsPerVertex, GL_FLOAT, false,
+                        kSquareCoords.size(), kSquareCoords.data());
+
+  // Pass texture coordinates corresponding to vertex array to the shader.
+  glEnableVertexAttribArray(mTextureCoordHandle);
+  glVertexAttribPointer(mTextureCoordHandle, 2, GL_FLOAT, false,
+                        kTextureCoords.size(), kTextureCoords.data());
 }
 
-bool EglTextureProgram::draw(GLuint textureId) {
+EglTextureProgram::~EglTextureProgram() {
+  if (mPositionHandle != -1) {
+    glDisableVertexAttribArray(mPositionHandle);
+  }
+  if (mTextureCoordHandle != -1) {
+    glDisableVertexAttribArray(mTextureCoordHandle);
+  }
+}
+
+bool EglTextureProgram::draw(GLuint textureId,
+                             const std::array<float, 16>& transformMatrix) {
   // Load compiled shader.
   glUseProgram(mProgram);
   if (checkEglError("glUseProgram")) {
     return false;
   }
 
-  // Pass vertex array to the shader.
-  int positionHandle = glGetAttribLocation(mProgram, "aPosition");
-  glEnableVertexAttribArray(positionHandle);
-  glVertexAttribPointer(positionHandle, kCoordsPerVertex, GL_FLOAT, false,
-                        kSquareCoords.size(), kSquareCoords.data());
-
-  // Pass texture coordinates corresponding to vertex array to the shader.
-  int textureCoordHandle = glGetAttribLocation(mProgram, "aTextureCoord");
-  glEnableVertexAttribArray(textureCoordHandle);
-  glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, false,
-                        kTextureCoords.size(), kTextureCoords.data());
+  // Pass transformation matrix for the texture coordinates.
+  glUniformMatrix4fv(mTransformMatrixHandle, 1, /*transpose=*/GL_FALSE,
+                     transformMatrix.data());
 
   // Configure texture for the shader.
-  int textureHandle = glGetUniformLocation(mProgram, "uTexture");
   glActiveTexture(GL_TEXTURE0);
   glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureId);
-  glUniform1i(textureHandle, 0);
+  glUniform1i(mTextureHandle, 0);
 
   // Draw triangle strip forming a square filling the viewport.
   glDrawElements(GL_TRIANGLES, kDrawOrder.size(), GL_UNSIGNED_BYTE,
diff --git a/services/camera/virtualcamera/util/EglProgram.h b/services/camera/virtualcamera/util/EglProgram.h
index 1b5f2cd..c695cbb 100644
--- a/services/camera/virtualcamera/util/EglProgram.h
+++ b/services/camera/virtualcamera/util/EglProgram.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_COMPANION_VIRTUALCAMERA_EGLPROGRAM_H
 #define ANDROID_COMPANION_VIRTUALCAMERA_EGLPROGRAM_H
 
+#include <array>
+
 #include "GLES/gl.h"
 
 namespace android {
@@ -58,8 +60,23 @@
   enum class TextureFormat { RGBA, YUV };
 
   EglTextureProgram(TextureFormat textureFormat = TextureFormat::YUV);
+  virtual ~EglTextureProgram();
 
-  bool draw(GLuint textureId);
+  // Draw texture over whole viewport, applying transformMatrix to texture
+  // coordinates.
+  //
+  // Transform matrix is 4x4 matrix represented in column-major order and is
+  // applied to texture coordinates in (s,t,0,1), s,t from <0,1> range prior to
+  // sampling:
+  //
+  // textureCoord = transformMatrix * vec4(s,t,0,1).xy
+  bool draw(GLuint textureId, const std::array<float, 16>& transformMatrix);
+
+ private:
+  int mPositionHandle = -1;
+  int mTextureCoordHandle = -1;
+  int mTransformMatrixHandle = -1;
+  int mTextureHandle = -1;
 };
 
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/util/EglSurfaceTexture.cc b/services/camera/virtualcamera/util/EglSurfaceTexture.cc
index 5b479c0..9f26e19 100644
--- a/services/camera/virtualcamera/util/EglSurfaceTexture.cc
+++ b/services/camera/virtualcamera/util/EglSurfaceTexture.cc
@@ -68,6 +68,16 @@
   return mTextureId;
 }
 
+GLuint EglSurfaceTexture::getTextureId() const {
+  return mTextureId;
+}
+
+std::array<float, 16> EglSurfaceTexture::getTransformMatrix() {
+  std::array<float, 16> matrix;
+  mGlConsumer->getTransformMatrix(matrix.data());
+  return matrix;
+}
+
 uint32_t EglSurfaceTexture::getWidth() const {
   return mWidth;
 }
diff --git a/services/camera/virtualcamera/util/EglSurfaceTexture.h b/services/camera/virtualcamera/util/EglSurfaceTexture.h
index 14dc7d6..faad7c4 100644
--- a/services/camera/virtualcamera/util/EglSurfaceTexture.h
+++ b/services/camera/virtualcamera/util/EglSurfaceTexture.h
@@ -57,6 +57,17 @@
   // Returns EGL texture id of the texture.
   GLuint updateTexture();
 
+  // Returns EGL texture id of the underlying texture.
+  GLuint getTextureId() const;
+
+  // Returns 4x4 transformation matrix in column-major order,
+  // which should be applied to EGL texture coordinates
+  // before sampling from the texture backed by android native buffer,
+  // so the corresponding region of the underlying buffer is sampled.
+  //
+  // See SurfaceTexture.getTransformMatrix for more details.
+  std::array<float, 16> getTransformMatrix();
+
  private:
   sp<IGraphicBufferProducer> mBufferProducer;
   sp<IGraphicBufferConsumer> mBufferConsumer;
diff --git a/services/camera/virtualcamera/util/MetadataBuilder.cc b/services/camera/virtualcamera/util/MetadataBuilder.cc
index 92a48b9..2bbd58c 100644
--- a/services/camera/virtualcamera/util/MetadataBuilder.cc
+++ b/services/camera/virtualcamera/util/MetadataBuilder.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -31,6 +31,7 @@
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "log/log.h"
 #include "system/camera_metadata.h"
+#include "util/Util.h"
 #include "utils/Errors.h"
 
 namespace android {
@@ -50,12 +51,17 @@
   return to;
 }
 
+template <typename To, typename From>
+std::vector<To> asVectorOf(const From from) {
+  return std::vector<To>({static_cast<To>(from)});
+}
+
 }  // namespace
 
 MetadataBuilder& MetadataBuilder::setSupportedHardwareLevel(
     camera_metadata_enum_android_info_supported_hardware_level_t hwLevel) {
   mEntryMap[ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL] =
-      std::vector<uint8_t>({static_cast<uint8_t>(hwLevel)});
+      asVectorOf<uint8_t>(hwLevel);
   return *this;
 }
 
@@ -63,27 +69,63 @@
   const uint8_t metadataVal = flashAvailable
                                   ? ANDROID_FLASH_INFO_AVAILABLE_TRUE
                                   : ANDROID_FLASH_INFO_AVAILABLE_FALSE;
-  mEntryMap[ANDROID_FLASH_INFO_AVAILABLE] = std::vector<uint8_t>({metadataVal});
+  mEntryMap[ANDROID_FLASH_INFO_AVAILABLE] = asVectorOf<uint8_t>(metadataVal);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFlashState(
+    const camera_metadata_enum_android_flash_state_t flashState) {
+  mEntryMap[ANDROID_FLASH_STATE] = asVectorOf<uint8_t>(flashState);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFlashMode(
+    const camera_metadata_enum_android_flash_mode_t flashMode) {
+  mEntryMap[ANDROID_FLASH_MODE] = asVectorOf<uint8_t>(flashMode);
   return *this;
 }
 
 MetadataBuilder& MetadataBuilder::setLensFacing(
     camera_metadata_enum_android_lens_facing lensFacing) {
-  mEntryMap[ANDROID_LENS_FACING] =
-      std::vector<uint8_t>({static_cast<uint8_t>(lensFacing)});
+  mEntryMap[ANDROID_LENS_FACING] = asVectorOf<uint8_t>(lensFacing);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorReadoutTimestamp(
+    const camera_metadata_enum_android_sensor_readout_timestamp_t
+        sensorReadoutTimestamp) {
+  mEntryMap[ANDROID_SENSOR_READOUT_TIMESTAMP] =
+      asVectorOf<uint8_t>(sensorReadoutTimestamp);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableFocalLengths(
+    const std::vector<float>& focalLengths) {
+  mEntryMap[ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS] = focalLengths;
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFocalLength(float focalLength) {
+  mEntryMap[ANDROID_LENS_FOCAL_LENGTH] = asVectorOf<float>(focalLength);
   return *this;
 }
 
 MetadataBuilder& MetadataBuilder::setSensorOrientation(int32_t sensorOrientation) {
-  mEntryMap[ANDROID_SENSOR_ORIENTATION] =
-      std::vector<int32_t>({sensorOrientation});
+  mEntryMap[ANDROID_SENSOR_ORIENTATION] = asVectorOf<int32_t>(sensorOrientation);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorTimestampSource(
+    const camera_metadata_enum_android_sensor_info_timestamp_source_t
+        timestampSource) {
+  mEntryMap[ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE] =
+      asVectorOf<uint8_t>(timestampSource);
   return *this;
 }
 
 MetadataBuilder& MetadataBuilder::setSensorTimestamp(
     std::chrono::nanoseconds timestamp) {
-  mEntryMap[ANDROID_SENSOR_TIMESTAMP] =
-      std::vector<int64_t>({timestamp.count()});
+  mEntryMap[ANDROID_SENSOR_TIMESTAMP] = asVectorOf<int64_t>(timestamp.count());
   return *this;
 }
 
@@ -95,6 +137,22 @@
   return *this;
 }
 
+MetadataBuilder& MetadataBuilder::setAvailableTestPatternModes(
+    const std::vector<camera_metadata_enum_android_sensor_test_pattern_mode>&
+        testPatternModes) {
+  mEntryMap[ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES] =
+      convertTo<int32_t>(testPatternModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFaceDetectMode(
+    const camera_metadata_enum_android_statistics_face_detect_mode_t
+        faceDetectMode) {
+  mEntryMap[ANDROID_STATISTICS_FACE_DETECT_MODE] =
+      asVectorOf<uint8_t>(faceDetectMode);
+  return *this;
+}
+
 MetadataBuilder& MetadataBuilder::setControlAvailableModes(
     const std::vector<camera_metadata_enum_android_control_mode_t>&
         availableModes) {
@@ -103,6 +161,43 @@
   return *this;
 }
 
+MetadataBuilder& MetadataBuilder::setControlMode(
+    const camera_metadata_enum_android_control_mode_t mode) {
+  mEntryMap[ANDROID_CONTROL_MODE] = asVectorOf<uint8_t>(mode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableSceneModes(
+    const std::vector<camera_metadata_enum_android_control_scene_mode>&
+        availableSceneModes) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_SCENE_MODES] =
+      convertTo<uint8_t>(availableSceneModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableEffects(
+    const std::vector<camera_metadata_enum_android_control_effect_mode>&
+        availableEffects) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_EFFECTS] =
+      convertTo<uint8_t>(availableEffects);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlEffectMode(
+    const camera_metadata_enum_android_control_effect_mode_t effectMode) {
+  mEntryMap[ANDROID_CONTROL_EFFECT_MODE] = asVectorOf<uint8_t>(effectMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableVideoStabilizationModes(
+    const std::vector<
+        camera_metadata_enum_android_control_video_stabilization_mode_t>&
+        videoStabilizationModes) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES] =
+      convertTo<uint8_t>(videoStabilizationModes);
+  return *this;
+}
+
 MetadataBuilder& MetadataBuilder::setControlAfAvailableModes(
     const std::vector<camera_metadata_enum_android_control_af_mode_t>&
         availableModes) {
@@ -113,18 +208,55 @@
 
 MetadataBuilder& MetadataBuilder::setControlAfMode(
     const camera_metadata_enum_android_control_af_mode_t mode) {
-  mEntryMap[ANDROID_CONTROL_AF_MODE] =
-      std::vector<uint8_t>({static_cast<uint8_t>(mode)});
+  mEntryMap[ANDROID_CONTROL_AF_MODE] = asVectorOf<uint8_t>(mode);
   return *this;
 }
 
-MetadataBuilder& MetadataBuilder::setControlAeAvailableFpsRange(
+// See ANDROID_CONTROL_AF_TRIGGER_MODE in CameraMetadataTag.aidl.
+MetadataBuilder& MetadataBuilder::setControlAfTrigger(
+    const camera_metadata_enum_android_control_af_trigger_t trigger) {
+  mEntryMap[ANDROID_CONTROL_AF_TRIGGER] = asVectorOf<uint8_t>(trigger);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAvailableFpsRanges(
+    const std::vector<FpsRange>& fpsRanges) {
+  std::vector<int32_t> ranges;
+  ranges.reserve(2 * fpsRanges.size());
+  for (const FpsRange fpsRange : fpsRanges) {
+    ranges.push_back(fpsRange.minFps);
+    ranges.push_back(fpsRange.maxFps);
+  }
+  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES] = std::move(ranges);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeTargetFpsRange(
     const int32_t minFps, const int32_t maxFps) {
-  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES] =
+  mEntryMap[ANDROID_CONTROL_AE_TARGET_FPS_RANGE] =
       std::vector<int32_t>({minFps, maxFps});
   return *this;
 }
 
+MetadataBuilder& MetadataBuilder::setControlAeMode(
+    camera_metadata_enum_android_control_ae_mode_t mode) {
+  mEntryMap[ANDROID_CONTROL_AE_MODE] = asVectorOf<uint8_t>(mode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAvailableModes(
+    const std::vector<camera_metadata_enum_android_control_ae_mode_t>& modes) {
+  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_MODES] = convertTo<uint8_t>(modes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAePrecaptureTrigger(
+    const camera_metadata_enum_android_control_ae_precapture_trigger_t trigger) {
+  mEntryMap[ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER] =
+      asVectorOf<uint8_t>(trigger);
+  return *this;
+}
+
 MetadataBuilder& MetadataBuilder::setControlMaxRegions(int32_t maxAeRegions,
                                                        int32_t maxAwbRegions,
                                                        int32_t maxAfRegions) {
@@ -133,6 +265,54 @@
   return *this;
 }
 
+MetadataBuilder& MetadataBuilder::setControlAvailableAwbModes(
+    const std::vector<camera_metadata_enum_android_control_awb_mode>& awbModes) {
+  mEntryMap[ANDROID_CONTROL_AWB_AVAILABLE_MODES] = convertTo<uint8_t>(awbModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAwbMode(
+    const camera_metadata_enum_android_control_awb_mode awbMode) {
+  mEntryMap[ANDROID_CONTROL_AWB_MODE] = asVectorOf<uint8_t>(awbMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAwbLockAvailable(
+    const bool awbLockAvailable) {
+  const uint8_t lockAvailable = awbLockAvailable
+                                    ? ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE
+                                    : ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
+  mEntryMap[ANDROID_CONTROL_AWB_LOCK_AVAILABLE] =
+      std::vector<uint8_t>({lockAvailable});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAvailableAntibandingModes(
+    const std::vector<camera_metadata_enum_android_control_ae_antibanding_mode_t>&
+        antibandingModes) {
+  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES] =
+      convertTo<uint8_t>(antibandingModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAntibandingMode(
+    const camera_metadata_enum_android_control_ae_antibanding_mode_t
+        antibandingMode) {
+  mEntryMap[ANDROID_CONTROL_AE_ANTIBANDING_MODE] =
+      asVectorOf<uint8_t>(antibandingMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeLockAvailable(
+    const bool aeLockAvailable) {
+  const uint8_t lockAvailable = aeLockAvailable
+                                    ? ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE
+                                    : ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
+  mEntryMap[ANDROID_CONTROL_AE_LOCK_AVAILABLE] =
+      asVectorOf<uint8_t>(lockAvailable);
+  return *this;
+}
+
 MetadataBuilder& MetadataBuilder::setControlAeRegions(
     const std::vector<ControlRegion>& aeRegions) {
   std::vector<int32_t> regions;
@@ -180,13 +360,71 @@
 
 MetadataBuilder& MetadataBuilder::setControlCaptureIntent(
     const camera_metadata_enum_android_control_capture_intent_t intent) {
-  mEntryMap[ANDROID_CONTROL_CAPTURE_INTENT] =
-      std::vector<uint8_t>({static_cast<uint8_t>(intent)});
+  mEntryMap[ANDROID_CONTROL_CAPTURE_INTENT] = asVectorOf<uint8_t>(intent);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setCropRegion(const int32_t x, const int32_t y,
+                                                const int32_t width,
+                                                const int32_t height) {
+  mEntryMap[ANDROID_SCALER_CROP_REGION] =
+      std::vector<int32_t>({x, y, width, height});
   return *this;
 }
 
 MetadataBuilder& MetadataBuilder::setMaxJpegSize(const int32_t size) {
-  mEntryMap[ANDROID_JPEG_MAX_SIZE] = std::vector<int32_t>({size});
+  mEntryMap[ANDROID_JPEG_MAX_SIZE] = asVectorOf<int32_t>(size);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setMaxFrameDuration(
+    const std::chrono::nanoseconds duration) {
+  mEntryMap[ANDROID_SENSOR_INFO_MAX_FRAME_DURATION] =
+      asVectorOf<int64_t>(duration.count());
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setJpegAvailableThumbnailSizes(
+    const std::vector<Resolution>& thumbnailSizes) {
+  std::vector<int32_t> sizes;
+  sizes.reserve(thumbnailSizes.size() * 2);
+  for (const Resolution& resolution : thumbnailSizes) {
+    sizes.push_back(resolution.width);
+    sizes.push_back(resolution.height);
+  }
+  mEntryMap[ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES] = std::move(sizes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setMaxNumberOutputStreams(
+    const int32_t maxRawStreams, const int32_t maxProcessedStreams,
+    const int32_t maxStallStreams) {
+  mEntryMap[ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS] = std::vector<int32_t>(
+      {maxRawStreams, maxProcessedStreams, maxStallStreams});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSyncMaxLatency(
+    camera_metadata_enum_android_sync_max_latency latency) {
+  mEntryMap[ANDROID_SYNC_MAX_LATENCY] = asVectorOf<int32_t>(latency);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setPipelineMaxDepth(const uint8_t maxDepth) {
+  mEntryMap[ANDROID_REQUEST_PIPELINE_MAX_DEPTH] = asVectorOf<uint8_t>(maxDepth);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setPipelineDepth(const uint8_t depth) {
+  mEntryMap[ANDROID_REQUEST_PIPELINE_DEPTH] = asVectorOf<uint8_t>(depth);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableRequestCapabilities(
+    const std::vector<camera_metadata_enum_android_request_available_capabilities_t>&
+        requestCapabilities) {
+  mEntryMap[ANDROID_REQUEST_AVAILABLE_CAPABILITIES] =
+      convertTo<uint8_t>(requestCapabilities);
   return *this;
 }
 
@@ -218,18 +456,18 @@
   }
 
   mEntryMap[ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS] =
-      metadataStreamConfigs;
+      std::move(metadataStreamConfigs);
   mEntryMap[ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS] =
-      metadataMinFrameDurations;
+      std::move(metadataMinFrameDurations);
   mEntryMap[ANDROID_SCALER_AVAILABLE_STALL_DURATIONS] =
-      metadataMinFrameDurations;
+      std::move(metadataStallDurations);
 
   return *this;
 }
 
 MetadataBuilder& MetadataBuilder::setAvailableMaxDigitalZoom(const float maxZoom) {
   mEntryMap[ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM] =
-      std::vector<float>(maxZoom);
+      asVectorOf<float>(maxZoom);
   return *this;
 }
 
@@ -246,6 +484,20 @@
   return *this;
 }
 
+MetadataBuilder& MetadataBuilder::setSensorPixelArraySize(int width,
+                                                          int height) {
+  mEntryMap[ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE] =
+      std::vector<int32_t>({width, height});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorPhysicalSize(float width,
+                                                        float height) {
+  mEntryMap[ANDROID_SENSOR_INFO_PHYSICAL_SIZE] =
+      std::vector<float>({width, height});
+  return *this;
+}
+
 MetadataBuilder& MetadataBuilder::setControlAeCompensationRange(int32_t min,
                                                                 int32_t max) {
   mEntryMap[ANDROID_CONTROL_AE_COMPENSATION_RANGE] =
@@ -256,7 +508,14 @@
 MetadataBuilder& MetadataBuilder::setControlAeCompensationStep(
     const camera_metadata_rational step) {
   mEntryMap[ANDROID_CONTROL_AE_COMPENSATION_STEP] =
-      std::vector<camera_metadata_rational>({step});
+      asVectorOf<camera_metadata_rational>(step);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeExposureCompensation(
+    const int32_t exposureCompensation) {
+  mEntryMap[ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION] =
+      asVectorOf<int32_t>(exposureCompensation);
   return *this;
 }
 
@@ -288,19 +547,23 @@
 }
 
 MetadataBuilder& MetadataBuilder::setAvailableCharacteristicKeys() {
-  std::vector<camera_metadata_tag_t> availableKeys;
-  availableKeys.reserve(mEntryMap.size());
-  for (const auto& [key, _] : mEntryMap) {
-    if (key != ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS) {
-      availableKeys.push_back(key);
-    }
-  }
-  setAvailableCharacteristicKeys(availableKeys);
+  mExtendWithAvailableCharacteristicsKeys = true;
   return *this;
 }
 
 std::unique_ptr<aidl::android::hardware::camera::device::CameraMetadata>
-MetadataBuilder::build() const {
+MetadataBuilder::build() {
+  if (mExtendWithAvailableCharacteristicsKeys) {
+    std::vector<camera_metadata_tag_t> availableKeys;
+    availableKeys.reserve(mEntryMap.size());
+    for (const auto& [key, _] : mEntryMap) {
+      if (key != ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS) {
+        availableKeys.push_back(key);
+      }
+    }
+    setAvailableCharacteristicKeys(availableKeys);
+  }
+
   CameraMetadata metadataHelper;
   for (const auto& entry : mEntryMap) {
     status_t ret = std::visit(
diff --git a/services/camera/virtualcamera/util/MetadataBuilder.h b/services/camera/virtualcamera/util/MetadataBuilder.h
index d992d31..df99089 100644
--- a/services/camera/virtualcamera/util/MetadataBuilder.h
+++ b/services/camera/virtualcamera/util/MetadataBuilder.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -26,6 +26,7 @@
 
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "system/camera_metadata.h"
+#include "util/Util.h"
 
 namespace android {
 namespace companion {
@@ -44,7 +45,7 @@
     int32_t format = 0;
     // Minimal frame duration - corresponds to maximal FPS for given format.
     // See ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS in CameraMetadataTag.aidl.
-    std::chrono::nanoseconds minFrameDuration{std::chrono::seconds(1) / 30};
+    std::chrono::nanoseconds minFrameDuration{0};
     // Minimal stall duration.
     // See ANDROID_SCALER_AVAILABLE_STALL_DURATIONS in CameraMetadataTag.aidl.
     std::chrono::nanoseconds minStallDuration{0};
@@ -58,6 +59,16 @@
     int32_t weight = 0;
   };
 
+  struct FpsRange {
+    int32_t minFps;
+    int32_t maxFps;
+
+    bool operator<(const FpsRange& other) const {
+      return maxFps == other.maxFps ? minFps < other.minFps
+                                    : maxFps < other.maxFps;
+    }
+  };
+
   MetadataBuilder() = default;
   ~MetadataBuilder() = default;
 
@@ -69,10 +80,29 @@
   // See ANDROID_FLASH_INFO_AVAILABLE in CameraMetadataTag.aidl.
   MetadataBuilder& setFlashAvailable(bool flashAvailable);
 
+  // See FLASH_STATE in CaptureResult.java.
+  MetadataBuilder& setFlashState(
+      camera_metadata_enum_android_flash_state_t flashState);
+
+  // See FLASH_MODE in CaptureRequest.java.
+  MetadataBuilder& setFlashMode(
+      camera_metadata_enum_android_flash_mode_t flashMode);
+
   // See ANDROID_LENS_FACING in CameraMetadataTag.aidl.
   MetadataBuilder& setLensFacing(
       camera_metadata_enum_android_lens_facing lensFacing);
 
+  // See ANDROID_SENSOR_READOUT_TIMESTAMP in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorReadoutTimestamp(
+      camera_metadata_enum_android_sensor_readout_timestamp_t
+          sensorReadoutTimestamp);
+
+  // See ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableFocalLengths(const std::vector<float>& focalLengths);
+
+  // See ANDROID_LENS_FOCAL_LENGTH in CameraMetadataTag.aidl.
+  MetadataBuilder& setFocalLength(float focalLength);
+
   // See ANDROID_SENSOR_ORIENTATION in CameraMetadataTag.aidl.
   MetadataBuilder& setSensorOrientation(int32_t sensorOrientation);
 
@@ -82,14 +112,33 @@
   // See ANDROID_SENSOR_TIMESTAMP in CameraMetadataTag.aidl.
   MetadataBuilder& setSensorTimestamp(std::chrono::nanoseconds timestamp);
 
+  // See SENSOR_INFO_TIMESTAMP_SOURCE in CameraCharacteristic.java.
+  MetadataBuilder& setSensorTimestampSource(
+      camera_metadata_enum_android_sensor_info_timestamp_source_t timestampSource);
+
   // See ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE in CameraMetadataTag.aidl.
   MetadataBuilder& setSensorActiveArraySize(int x0, int y0, int x1, int y1);
 
+  // See ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorPixelArraySize(int width, int height);
+
+  // See ANDROID_SENSOR_INFO_PHYSICAL_SIZE in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorPhysicalSize(float width, float height);
+
   // See ANDROID_STATISTICS_FACE_DETECT_MODE in CameraMetadataTag.aidl.
   MetadataBuilder& setAvailableFaceDetectModes(
       const std::vector<camera_metadata_enum_android_statistics_face_detect_mode_t>&
           faceDetectMode);
 
+  // See SENSOR_AVAILABLE_TEST_PATTERN_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setAvailableTestPatternModes(
+      const std::vector<camera_metadata_enum_android_sensor_test_pattern_mode>&
+          testPatternModes);
+
+  // See ANDROID_STATISTICS_FACE_DETECT_MODE in CaptureRequest.java.
+  MetadataBuilder& setFaceDetectMode(
+      camera_metadata_enum_android_statistics_face_detect_mode_t faceDetectMode);
+
   // Sets available stream configurations along with corresponding minimal frame
   // durations (corresponding to max fps) and stall durations.
   //
@@ -104,12 +153,60 @@
       const std::vector<camera_metadata_enum_android_control_mode_t>&
           availableModes);
 
+  // See ANDROID_CONTROL_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlMode(
+      camera_metadata_enum_android_control_mode_t mode);
+
+  // See ANDROID_CONTROL_AVAILABLE_SCENE_MODES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableSceneModes(
+      const std::vector<camera_metadata_enum_android_control_scene_mode>&
+          availableSceneModes);
+
+  // See ANDROID_CONTROL_AVAILABLE_EFFECTS in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableEffects(
+      const std::vector<camera_metadata_enum_android_control_effect_mode>&
+          availableEffects);
+
+  // See CONTROL_EFFECT_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlEffectMode(
+      camera_metadata_enum_android_control_effect_mode_t effectMode);
+
+  // See ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES
+  MetadataBuilder& setControlAvailableVideoStabilizationModes(
+      const std::vector<
+          camera_metadata_enum_android_control_video_stabilization_mode_t>&
+          videoStabilizationModes);
+
+  // See CONTROL_AE_AVAILABLE_ANTIBANDING_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setControlAeAvailableAntibandingModes(
+      const std::vector<camera_metadata_enum_android_control_ae_antibanding_mode_t>&
+          antibandingModes);
+
+  // See CONTROL_AE_ANTIBANDING_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlAeAntibandingMode(
+      camera_metadata_enum_android_control_ae_antibanding_mode_t antibandingMode);
+
   // See ANDROID_CONTROL_AE_COMPENSATION_RANGE in CameraMetadataTag.aidl.
   MetadataBuilder& setControlAeCompensationRange(int32_t min, int32_t max);
 
   // See ANDROID_CONTROL_AE_COMPENSATION_STEP in CameraMetadataTag.aidl.
   MetadataBuilder& setControlAeCompensationStep(camera_metadata_rational step);
 
+  // See ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeExposureCompensation(int32_t exposureCompensation);
+
+  // See ANDROID_CONTROL_AE_AVAILABLE_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setControlAeAvailableModes(
+      const std::vector<camera_metadata_enum_android_control_ae_mode_t>& modes);
+
+  // See ANDROID_CONTROL_AE_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlAeMode(
+      camera_metadata_enum_android_control_ae_mode_t step);
+
+  // See ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER in CaptureRequest.java.
+  MetadataBuilder& setControlAePrecaptureTrigger(
+      camera_metadata_enum_android_control_ae_precapture_trigger_t trigger);
+
   // See ANDROID_CONTROL_AF_AVAILABLE_MODES in CameraMetadataTag.aidl.
   MetadataBuilder& setControlAfAvailableModes(
       const std::vector<camera_metadata_enum_android_control_af_mode_t>&
@@ -119,8 +216,16 @@
   MetadataBuilder& setControlAfMode(
       const camera_metadata_enum_android_control_af_mode_t mode);
 
+  // See ANDROID_CONTROL_AF_TRIGGER_MODE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAfTrigger(
+      const camera_metadata_enum_android_control_af_trigger_t trigger);
+
   // See ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAeAvailableFpsRange(int32_t min, int32_t max);
+  MetadataBuilder& setControlAeAvailableFpsRanges(
+      const std::vector<FpsRange>& fpsRanges);
+
+  // See ANDROID_CONTROL_AE_TARGET_FPS_RANGE in CaptureRequest.java.
+  MetadataBuilder& setControlAeTargetFpsRange(int32_t min, int32_t max);
 
   // See ANDROID_CONTROL_CAPTURE_INTENT in CameraMetadataTag.aidl.
   MetadataBuilder& setControlCaptureIntent(
@@ -131,6 +236,20 @@
                                         int32_t maxAwbRegions,
                                         int32_t maxAfRegions);
 
+  // See ANDROID_CONTROL_AWB_AVAILABLE_MODES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableAwbModes(
+      const std::vector<camera_metadata_enum_android_control_awb_mode>& awbModes);
+
+  // See ANDROID_CONTROL_AWB_AVAILABLE_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlAwbMode(
+      camera_metadata_enum_android_control_awb_mode awb);
+
+  // See CONTROL_AWB_LOCK_AVAILABLE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAwbLockAvailable(bool awbLockAvailable);
+
+  // See CONTROL_AE_LOCK_AVAILABLE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeLockAvailable(bool aeLockAvailable);
+
   // See ANDROID_CONTROL_AE_REGIONS in CameraMetadataTag.aidl.
   MetadataBuilder& setControlAeRegions(
       const std::vector<ControlRegion>& aeRegions);
@@ -139,6 +258,10 @@
   MetadataBuilder& setControlAwbRegions(
       const std::vector<ControlRegion>& awbRegions);
 
+  // See ANDROID_SCALER_CROP_REGION in CaptureRequest.java.
+  MetadataBuilder& setCropRegion(int32_t x, int32_t y, int32_t width,
+                                 int32_t height);
+
   // See ANDROID_CONTROL_AF_REGIONS in CameraMetadataTag.aidl.
   MetadataBuilder& setControlAfRegions(
       const std::vector<ControlRegion>& afRegions);
@@ -148,12 +271,43 @@
   // See ANDROID_JPEG_SIZE in CameraMetadataTag.aidl.
   MetadataBuilder& setMaxJpegSize(int32_t size);
 
+  // See SENSOR_INFO_MAX_FRAME_DURATION in CameraCharacteristic.java.
+  MetadataBuilder& setMaxFrameDuration(std::chrono::nanoseconds duration);
+
+  // See JPEG_AVAILABLE_THUMBNAIL_SIZES in CameraCharacteristic.java.
+  MetadataBuilder& setJpegAvailableThumbnailSizes(
+      const std::vector<Resolution>& thumbnailSizes);
+
+  // The maximum numbers of different types of output streams
+  // that can be configured and used simultaneously by a camera device.
+  //
+  // See ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS in CameraMetadataTag.aidl.
+  MetadataBuilder& setMaxNumberOutputStreams(int32_t maxRawStreams,
+                                             int32_t maxProcessedStreams,
+                                             int32_t maxStallStreams);
+
+  // See ANDROID_SYNC_MAX_LATENCY in CameraMetadataTag.aidl.
+  MetadataBuilder& setSyncMaxLatency(
+      camera_metadata_enum_android_sync_max_latency setSyncMaxLatency);
+
+  // See REQUEST_PIPELINE_MAX_DEPTH in CameraCharacteristic.java.
+  MetadataBuilder& setPipelineMaxDepth(uint8_t maxDepth);
+
+  // See REQUEST_PIPELINE_DEPTH in CaptureResult.java.
+  MetadataBuilder& setPipelineDepth(uint8_t depth);
+
   // See ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM in CameraMetadataTag.aidl.
   MetadataBuilder& setAvailableMaxDigitalZoom(const float maxZoom);
 
   // See ANDROID_CONTROL_ZOOM_RATIO_RANGE in CameraMetadataTag.aidl.
   MetadataBuilder& setControlZoomRatioRange(float min, float max);
 
+  // See ANDROID_REQUEST_AVAILABLE_CAPABILITIES in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableRequestCapabilities(
+      const std::vector<
+          camera_metadata_enum_android_request_available_capabilities_t>&
+          requestCapabilities);
+
   // A list of all keys that the camera device has available to use with
   // CaptureRequest.
   //
@@ -179,14 +333,14 @@
       const std::vector<camera_metadata_tag_t>& keys);
 
   // Extends metadata with ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS
-  // containing all previously set tags.
+  // containing all set tags.
   MetadataBuilder& setAvailableCharacteristicKeys();
 
   // Build CameraMetadata instance.
   //
   // Returns nullptr in case something went wrong.
   std::unique_ptr<::aidl::android::hardware::camera::device::CameraMetadata>
-  build() const;
+  build();
 
  private:
   // Maps metadata tags to vectors of values for the given tag.
@@ -195,6 +349,8 @@
                         std::vector<uint8_t>, std::vector<float>,
                         std::vector<camera_metadata_rational_t>>>
       mEntryMap;
+  // Extend metadata with ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS.
+  bool mExtendWithAvailableCharacteristicsKeys = false;
 };
 
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/util/Util.cc b/services/camera/virtualcamera/util/Util.cc
index df771b1..2d0545d 100644
--- a/services/camera/virtualcamera/util/Util.cc
+++ b/services/camera/virtualcamera/util/Util.cc
@@ -35,6 +35,7 @@
 // TODO(b/301023410) - Query actual max texture size.
 constexpr int kMaxTextureSize = 2048;
 constexpr int kLibJpegDctSize = DCTSIZE;
+constexpr int kMaxFpsUpperLimit = 60;
 
 constexpr std::array<Format, 2> kSupportedFormats{Format::YUV_420_888,
                                                   Format::RGBA_8888};
@@ -54,7 +55,7 @@
 
 // Returns true if specified format is supported for virtual camera input.
 bool isFormatSupportedForInput(const int width, const int height,
-                               const Format format) {
+                               const Format format, const int maxFps) {
   if (!isPixelFormatSupportedForInput(format)) {
     return false;
   }
@@ -71,6 +72,10 @@
     return false;
   }
 
+  if (maxFps <= 0 || maxFps > kMaxFpsUpperLimit) {
+    return false;
+  }
+
   return true;
 }
 
diff --git a/services/camera/virtualcamera/util/Util.h b/services/camera/virtualcamera/util/Util.h
index a73c99b..9f81bb1 100644
--- a/services/camera/virtualcamera/util/Util.h
+++ b/services/camera/virtualcamera/util/Util.h
@@ -50,7 +50,28 @@
 // Returns true if specified format is supported for virtual camera input.
 bool isFormatSupportedForInput(
     int width, int height,
-    ::aidl::android::companion::virtualcamera::Format format);
+    ::aidl::android::companion::virtualcamera::Format format, int maxFps);
+
+// Representation of resolution / size.
+struct Resolution {
+  Resolution(const int w, const int h) : width(w), height(h) {
+  }
+
+  // Order by increasing pixel count, and by width for same pixel count.
+  bool operator<(const Resolution& other) const {
+    const int pixCount = width * height;
+    const int otherPixCount = other.width * other.height;
+    return pixCount == otherPixCount ? width < other.width
+                                     : pixCount < otherPixCount;
+  }
+
+  bool operator==(const Resolution& other) const {
+    return width == other.width && height == other.height;
+  }
+
+  const int width;
+  const int height;
+};
 
 }  // namespace virtualcamera
 }  // namespace companion
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index 630a436..7dc445b 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -549,7 +549,7 @@
 
     int slot = 1;
     std::stringstream ss;
-    ss << "AudioPowerUsage:\n";
+    ss << "AudioPowerUsage interval " << mIntervalHours << " hours:\n";
     for (const auto &item : mItems) {
         if (slot >= limit - 1) {
             ss << "-- AudioPowerUsage may be truncated!\n";
diff --git a/services/mediametrics/include/mediametricsservice/TimedAction.h b/services/mediametrics/include/mediametricsservice/TimedAction.h
index c7ef585..8b53ded 100644
--- a/services/mediametrics/include/mediametricsservice/TimedAction.h
+++ b/services/mediametrics/include/mediametricsservice/TimedAction.h
@@ -25,6 +25,12 @@
 namespace android::mediametrics {
 
 class TimedAction {
+    // Use system_clock instead of steady_clock to include suspend time.
+    using TimerClock = class std::chrono::system_clock;
+
+    // Define granularity of wakeup to prevent delayed events if
+    // device is suspended.
+    static constexpr auto kWakeupInterval = std::chrono::minutes(3);
 public:
     TimedAction() : mThread{[this](){threadLoop();}} {}
 
@@ -35,7 +41,7 @@
     // TODO: return a handle for cancelling the action?
     template <typename T> // T is in units of std::chrono::duration.
     void postIn(const T& time, std::function<void()> f) {
-        postAt(std::chrono::steady_clock::now() + time, f);
+        postAt(TimerClock::now() + time, f);
     }
 
     template <typename T> // T is in units of std::chrono::time_point
@@ -75,16 +81,21 @@
     void threadLoop() NO_THREAD_SAFETY_ANALYSIS { // thread safety doesn't cover unique_lock
         std::unique_lock l(mLock);
         while (!mQuit) {
-            auto sleepUntilTime = std::chrono::time_point<std::chrono::steady_clock>::max();
+            auto sleepUntilTime = std::chrono::time_point<TimerClock>::max();
             if (!mMap.empty()) {
                 sleepUntilTime = mMap.begin()->first;
-                if (sleepUntilTime <= std::chrono::steady_clock::now()) {
+                const auto now = TimerClock::now();
+                if (sleepUntilTime <= now) {
                     auto node = mMap.extract(mMap.begin()); // removes from mMap.
                     l.unlock();
                     node.mapped()();
                     l.lock();
                     continue;
                 }
+                // Bionic uses CLOCK_MONOTONIC for its pthread_mutex regardless
+                // of REALTIME specification, use kWakeupInterval to ensure minimum
+                // granularity if suspended.
+                sleepUntilTime = std::min(sleepUntilTime, now + kWakeupInterval);
             }
             mCondition.wait_until(l, sleepUntilTime);
         }
@@ -93,7 +104,7 @@
     mutable std::mutex mLock;
     std::condition_variable mCondition GUARDED_BY(mLock);
     bool mQuit GUARDED_BY(mLock) = false;
-    std::multimap<std::chrono::time_point<std::chrono::steady_clock>, std::function<void()>>
+    std::multimap<std::chrono::time_point<TimerClock>, std::function<void()>>
             mMap GUARDED_BY(mLock); // multiple functions could execute at the same time.
 
     // needs to be initialized after the variables above, done in constructor initializer list.
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 6b48075..5b4fca9 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -37,6 +37,8 @@
 #include "AAudioServiceEndpointPlay.h"
 #include "AAudioServiceEndpointMMAP.h"
 
+#include <com_android_media_aaudio.h>
+
 #define AAUDIO_BUFFER_CAPACITY_MIN    (4 * 512)
 #define AAUDIO_SAMPLE_RATE_DEFAULT    48000
 
@@ -148,9 +150,15 @@
 
         // Try other formats if the config from APM is the same as our current config.
         // Some HALs may report its format support incorrectly.
-        if ((previousConfig.format == config.format) &&
-                (previousConfig.sample_rate == config.sample_rate)) {
-            config.format = getNextFormatToTry(config.format);
+        if (previousConfig.format == config.format) {
+            if (previousConfig.sample_rate == config.sample_rate) {
+                config.format = getNextFormatToTry(config.format);
+            } else if (!com::android::media::aaudio::sample_rate_conversion()) {
+                ALOGI("%s() - AAudio SRC feature not enabled, different rates! %d != %d",
+                      __func__, previousConfig.sample_rate, config.sample_rate);
+                result = AAUDIO_ERROR_INVALID_RATE;
+                break;
+            }
         }
 
         ALOGD("%s() %#x %d failed, perhaps due to format or sample rate. Try again with %#x %d",
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index d9e7e2b..dc70c79 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -75,11 +75,7 @@
                         this, getState());
 
     // Stop the command thread before destroying.
-    if (mThreadEnabled) {
-        mThreadEnabled = false;
-        mCommandQueue.stopWaiting();
-        mCommandThread.stop();
-    }
+    stopCommandThread();
 }
 
 std::string AAudioServiceStreamBase::dumpHeader() {
@@ -194,26 +190,27 @@
 
 error:
     closeAndClear();
-    mThreadEnabled = false;
-    mCommandQueue.stopWaiting();
-    mCommandThread.stop();
+    stopCommandThread();
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
     aaudio_result_t result = sendCommand(CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    if (result == AAUDIO_ERROR_ALREADY_CLOSED) {
+        // AAUDIO_ERROR_ALREADY_CLOSED is not a really error but just indicate the stream has
+        // already been closed. In that case, there is no need to close the stream once more.
+        ALOGD("The stream(%d) is already closed", mHandle);
+        return AAUDIO_OK;
+    }
 
-    // Stop the command thread as the stream is closed.
-    mThreadEnabled = false;
-    mCommandQueue.stopWaiting();
-    mCommandThread.stop();
+    stopCommandThread();
 
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close_l() {
     if (getState() == AAUDIO_STREAM_STATE_CLOSED) {
-        return AAUDIO_OK;
+        return AAUDIO_ERROR_ALREADY_CLOSED;
     }
 
     // This will stop the stream, just in case it was not already stopped.
@@ -766,3 +763,11 @@
         .record();
     return result;
 }
+
+void AAudioServiceStreamBase::stopCommandThread() {
+    bool threadEnabled = true;
+    if (mThreadEnabled.compare_exchange_strong(threadEnabled, false)) {
+        mCommandQueue.stopWaiting();
+        mCommandThread.stop();
+    }
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index d5061b3..96a6d44 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -360,7 +360,7 @@
         EXIT_STANDBY,
     };
     AAudioThread            mCommandThread;
-    std::atomic<bool>       mThreadEnabled{false};
+    std::atomic_bool        mThreadEnabled{false};
     AAudioCommandQueue      mCommandQueue;
 
     int32_t                 mFramesPerBurst = 0;
@@ -400,6 +400,8 @@
                                 bool waitForReply = false,
                                 int64_t timeoutNanos = 0);
 
+    void stopCommandThread();
+
     aaudio_result_t closeAndClear();
 
     /**
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 9fe06b7..12ce17f 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -97,6 +97,7 @@
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "packagemanager_aidl-cpp",
+        "com.android.media.aaudio-aconfig-cc",
     ],
 
     static_libs: [
diff --git a/services/oboeservice/fuzzer/Android.bp b/services/oboeservice/fuzzer/Android.bp
index 0230935..c130b12 100644
--- a/services/oboeservice/fuzzer/Android.bp
+++ b/services/oboeservice/fuzzer/Android.bp
@@ -51,6 +51,7 @@
         "aaudio-aidl-cpp",
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
+        "com.android.media.aaudio-aconfig-cc",
     ],
     static_libs: [
         "libaaudioservice",