Merge "avif: Use grid size to query for codec"
diff --git a/MainlineFiles.cfg b/MainlineFiles.cfg
index 4e1d3a2..bf39c1a 100644
--- a/MainlineFiles.cfg
+++ b/MainlineFiles.cfg
@@ -30,6 +30,7 @@
 media/codec2/hidl/
 media/codec2/sfplugin/utils/
 media/codec2/vndk/
+media/libstagefright/data/media_codecs_sw.xml
 media/module/
 services/mediacodec/
 
diff --git a/camera/CameraSessionStats.cpp b/camera/CameraSessionStats.cpp
index d1aa36a..0706ac1 100644
--- a/camera/CameraSessionStats.cpp
+++ b/camera/CameraSessionStats.cpp
@@ -130,6 +130,12 @@
         return err;
     }
 
+    int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED;
+    if ((err = parcel->readInt32(&colorSpace)) != OK) {
+        ALOGE("%s: Failed to read color space from parcel", __FUNCTION__);
+        return err;
+    }
+
     mWidth = width;
     mHeight = height;
     mFormat = format;
@@ -146,6 +152,7 @@
     mHistogramCounts = std::move(histogramCounts);
     mDynamicRangeProfile = dynamicRangeProfile;
     mStreamUseCase = streamUseCase;
+    mColorSpace = colorSpace;
 
     return OK;
 }
@@ -238,6 +245,11 @@
         return err;
     }
 
+    if ((err = parcel->writeInt32(mColorSpace)) != OK) {
+        ALOGE("%s: Failed to write color space", __FUNCTION__);
+        return err;
+    }
+
     return OK;
 }
 
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 11d4960..d50566d 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -26,8 +26,8 @@
 #include <system/camera_metadata.h>
 #include <utils/String8.h>
 
-namespace android {
 
+namespace android {
 
 const int OutputConfiguration::INVALID_ROTATION = -1;
 const int OutputConfiguration::INVALID_SET_ID = -1;
@@ -81,6 +81,10 @@
     return mDynamicRangeProfile;
 }
 
+int32_t OutputConfiguration::getColorSpace() const {
+    return mColorSpace;
+}
+
 int64_t OutputConfiguration::getStreamUseCase() const {
     return mStreamUseCase;
 }
@@ -103,6 +107,7 @@
         mIsShared(false),
         mIsMultiResolution(false),
         mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
+        mColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED),
         mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT),
         mTimestampBase(TIMESTAMP_BASE_DEFAULT),
         mMirrorMode(MIRROR_MODE_AUTO) {
@@ -191,6 +196,11 @@
         ALOGE("%s: Failed to read surface dynamic range profile flag from parcel", __FUNCTION__);
         return err;
     }
+    int32_t colorSpace;
+    if ((err = parcel->readInt32(&colorSpace)) != OK) {
+        ALOGE("%s: Failed to read surface color space flag from parcel", __FUNCTION__);
+        return err;
+    }
 
     int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
     if ((err = parcel->readInt64(&streamUseCase)) != OK) {
@@ -230,6 +240,7 @@
 
     mSensorPixelModesUsed = std::move(sensorPixelModesUsed);
     mDynamicRangeProfile = dynamicProfile;
+    mColorSpace = colorSpace;
 
     ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
           " physicalCameraId = %s, isMultiResolution = %d, streamUseCase = %" PRId64
@@ -252,6 +263,7 @@
     mPhysicalCameraId = physicalId;
     mIsMultiResolution = false;
     mDynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+    mColorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED;
     mStreamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
     mTimestampBase = TIMESTAMP_BASE_DEFAULT;
     mMirrorMode = MIRROR_MODE_AUTO;
@@ -265,6 +277,7 @@
     mWidth(width), mHeight(height), mIsDeferred(false), mIsShared(isShared),
     mPhysicalCameraId(physicalCameraId), mIsMultiResolution(false),
     mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
+    mColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED),
     mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT),
     mTimestampBase(TIMESTAMP_BASE_DEFAULT),
     mMirrorMode(MIRROR_MODE_AUTO) { }
@@ -317,6 +330,9 @@
     err = parcel->writeInt64(mDynamicRangeProfile);
     if (err != OK) return err;
 
+    err = parcel->writeInt32(mColorSpace);
+    if (err != OK) return err;
+
     err = parcel->writeInt64(mStreamUseCase);
     if (err != OK) return err;
 
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index 094a3c1..27ebb7a 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -43,7 +43,7 @@
         "android.hardware.camera.provider@2.5",
         "android.hardware.camera.provider@2.6",
         "android.hardware.camera.provider@2.7",
-        "android.hardware.camera.provider-V1-ndk",
+        "android.hardware.camera.provider-V2-ndk",
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.4",
diff --git a/camera/include/camera/CameraSessionStats.h b/camera/include/camera/CameraSessionStats.h
index aaa88b2..90ee924 100644
--- a/camera/include/camera/CameraSessionStats.h
+++ b/camera/include/camera/CameraSessionStats.h
@@ -67,22 +67,26 @@
     int64_t mDynamicRangeProfile;
     // Stream use case
     int64_t mStreamUseCase;
+    // Color space
+    int32_t mColorSpace;
 
     CameraStreamStats() :
             mWidth(0), mHeight(0), mFormat(0), mMaxPreviewFps(0), mDataSpace(0), mUsage(0),
             mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
             mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
             mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
-            mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {}
+            mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT),
+            mColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED) {}
     CameraStreamStats(int width, int height, int format, float maxPreviewFps, int dataSpace,
             int64_t usage, int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile,
-            int streamUseCase)
+            int streamUseCase, int32_t colorSpace)
             : mWidth(width), mHeight(height), mFormat(format), mMaxPreviewFps(maxPreviewFps),
               mDataSpace(dataSpace), mUsage(usage), mRequestCount(0), mErrorCount(0),
               mStartLatencyMs(0), mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
               mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
               mDynamicRangeProfile(dynamicRangeProfile),
-              mStreamUseCase(streamUseCase) {}
+              mStreamUseCase(streamUseCase),
+              mColorSpace(colorSpace) {}
 
     virtual status_t readFromParcel(const android::Parcel* parcel) override;
     virtual status_t writeToParcel(android::Parcel* parcel) const override;
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index b7c7f7f..a713b40 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -61,6 +61,7 @@
     int                        getWidth() const;
     int                        getHeight() const;
     int64_t                    getDynamicRangeProfile() const;
+    int32_t                    getColorSpace() const;
     bool                       isDeferred() const;
     bool                       isShared() const;
     String16                   getPhysicalCameraId() const;
@@ -111,6 +112,7 @@
                 mIsMultiResolution == other.mIsMultiResolution &&
                 sensorPixelModesUsedEqual(other) &&
                 mDynamicRangeProfile == other.mDynamicRangeProfile &&
+                mColorSpace == other.mColorSpace &&
                 mStreamUseCase == other.mStreamUseCase &&
                 mTimestampBase == other.mTimestampBase &&
                 mMirrorMode == other.mMirrorMode);
@@ -153,6 +155,9 @@
         if (mDynamicRangeProfile != other.mDynamicRangeProfile) {
             return mDynamicRangeProfile < other.mDynamicRangeProfile;
         }
+        if (mColorSpace != other.mColorSpace) {
+            return mColorSpace < other.mColorSpace;
+        }
         if (mStreamUseCase != other.mStreamUseCase) {
             return mStreamUseCase < other.mStreamUseCase;
         }
@@ -187,6 +192,7 @@
     bool                       mIsMultiResolution;
     std::vector<int32_t>       mSensorPixelModesUsed;
     int64_t                    mDynamicRangeProfile;
+    int32_t                    mColorSpace;
     int64_t                    mStreamUseCase;
     int                        mTimestampBase;
     int                        mMirrorMode;
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 05124c0..1018b41 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -537,6 +537,7 @@
         case ACAMERA_CONTROL_ENABLE_ZSL:
         case ACAMERA_CONTROL_EXTENDED_SCENE_MODE:
         case ACAMERA_CONTROL_ZOOM_RATIO:
+        case ACAMERA_CONTROL_SETTINGS_OVERRIDE:
         case ACAMERA_EDGE_MODE:
         case ACAMERA_FLASH_MODE:
         case ACAMERA_HOT_PIXEL_MODE:
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index 729182e..7388678 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -209,7 +209,8 @@
  * Query the capabilities of a camera device. These capabilities are
  * immutable for a given camera.
  *
- * <p>See {@link ACameraMetadata} document and {@link NdkCameraMetadataTags.h} for more details.</p>
+ * <p>See {@link ACameraMetadata} document and <a href="https://cs.android.com/android/platform/superproject/+/master:frameworks/av/camera/ndk/include/camera/NdkCameraMetadataTags.h">NdkCameraMetadataTags.h</a>
+ * for more details.</p>
  *
  * <p>The caller must call {@link ACameraMetadata_free} to free the memory of the output
  * characteristics.</p>
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index b331d50..a9f53dd 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -96,9 +96,12 @@
     /**
      * The tag identifying the entry.
      *
-     * <p> It is one of the values defined in {@link NdkCameraMetadataTags.h}, and defines how the
+     * <p> It is one of the values defined in
+     * <a href="https://cs.android.com/android/platform/superproject/+/master:frameworks/av/camera/ndk/include/camera/NdkCameraMetadataTags.h">NdkCameraMetadataTags.h</a>
+     * , and defines how the
      * entry should be interpreted and which parts of the API provide it.
-     * See {@link NdkCameraMetadataTags.h} for more details. </p>
+     * See <a href="https://cs.android.com/android/platform/superproject/+/master:frameworks/av/camera/ndk/include/camera/NdkCameraMetadataTags.h">NdkCameraMetadataTags.h</a>
+     * for more details. </p>
      */
     uint32_t tag;
 
@@ -141,9 +144,11 @@
     /**
      * The tag identifying the entry.
      *
-     * <p> It is one of the values defined in {@link NdkCameraMetadataTags.h}, and defines how the
+     * <p> It is one of the values defined in <a href="https://cs.android.com/android/platform/superproject/+/master:frameworks/av/camera/ndk/include/camera/NdkCameraMetadataTags.h">NdkCameraMetadataTags.h</a>
+     * , and defines how the
      * entry should be interpreted and which parts of the API provide it.
-     * See {@link NdkCameraMetadataTags.h} for more details. </p>
+     * See <a href="https://cs.android.com/android/platform/superproject/+/master:frameworks/av/camera/ndk/include/camera/NdkCameraMetadataTags.h">NdkCameraMetadataTags.h</a>
+     * for more details. </p>
      */
     uint32_t tag;
 
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0d156a5..def883b 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -2044,6 +2044,105 @@
      */
     ACAMERA_CONTROL_ZOOM_RATIO =                                // float
             ACAMERA_CONTROL_START + 47,
+    /**
+     * <p>The desired CaptureRequest settings override with which certain keys are
+     * applied earlier so that they can take effect sooner.</p>
+     *
+     * <p>Type: int32 (acamera_metadata_enum_android_control_settings_override_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul></p>
+     *
+     * <p>There are some CaptureRequest keys which can be applied earlier than others
+     * when controls within a CaptureRequest aren't required to take effect at the same time.
+     * One such example is zoom. Zoom can be applied at a later stage of the camera pipeline.
+     * As soon as the camera device receives the CaptureRequest, it can apply the requested
+     * zoom value onto an earlier request that's already in the pipeline, thus improves zoom
+     * latency.</p>
+     * <p>This key's value in the capture result reflects whether the controls for this capture
+     * are overridden "by" a newer request. This means that if a capture request turns on
+     * settings override, the capture result of an earlier request will contain the key value
+     * of ZOOM. On the other hand, if a capture request has settings override turned on,
+     * but all newer requests have it turned off, the key's value in the capture result will
+     * be OFF because this capture isn't overridden by a newer capture. In the two examples
+     * below, the capture results columns illustrate the settingsOverride values in different
+     * scenarios.</p>
+     * <p>Assuming the zoom settings override can speed up by 1 frame, below example illustrates
+     * the speed-up at the start of capture session:</p>
+     * <pre><code>Camera session created
+     * Request 1 (zoom=1.0x, override=ZOOM) -&gt;
+     * Request 2 (zoom=1.2x, override=ZOOM) -&gt;
+     * Request 3 (zoom=1.4x, override=ZOOM) -&gt;  Result 1 (zoom=1.2x, override=ZOOM)
+     * Request 4 (zoom=1.6x, override=ZOOM) -&gt;  Result 2 (zoom=1.4x, override=ZOOM)
+     * Request 5 (zoom=1.8x, override=ZOOM) -&gt;  Result 3 (zoom=1.6x, override=ZOOM)
+     *                                      -&gt;  Result 4 (zoom=1.8x, override=ZOOM)
+     *                                      -&gt;  Result 5 (zoom=1.8x, override=OFF)
+     * </code></pre>
+     * <p>The application can turn on settings override and use zoom as normal. The example
+     * shows that the later zoom values (1.2x, 1.4x, 1.6x, and 1.8x) overwrite the zoom
+     * values (1.0x, 1.2x, 1.4x, and 1.8x) of earlier requests (#1, #2, #3, and #4).</p>
+     * <p>The application must make sure the settings override doesn't interfere with user
+     * journeys requiring simultaneous application of all controls in CaptureRequest on the
+     * requested output targets. For example, if the application takes a still capture using
+     * CameraCaptureSession#capture, and the repeating request immediately sets a different
+     * zoom value using override, the inflight still capture could have its zoom value
+     * overwritten unexpectedly.</p>
+     * <p>So the application is strongly recommended to turn off settingsOverride when taking
+     * still/burst captures, and turn it back on when there is only repeating viewfinder
+     * request and no inflight still/burst captures.</p>
+     * <p>Below is the example demonstrating the transitions in and out of the
+     * settings override:</p>
+     * <pre><code>Request 1 (zoom=1.0x, override=OFF)
+     * Request 2 (zoom=1.2x, override=OFF)
+     * Request 3 (zoom=1.4x, override=ZOOM)  -&gt; Result 1 (zoom=1.0x, override=OFF)
+     * Request 4 (zoom=1.6x, override=ZOOM)  -&gt; Result 2 (zoom=1.4x, override=ZOOM)
+     * Request 5 (zoom=1.8x, override=OFF)   -&gt; Result 3 (zoom=1.6x, override=ZOOM)
+     *                                       -&gt; Result 4 (zoom=1.6x, override=OFF)
+     *                                       -&gt; Result 5 (zoom=1.8x, override=OFF)
+     * </code></pre>
+     * <p>This example shows that:</p>
+     * <ul>
+     * <li>The application "ramps in" settings override by setting the control to ZOOM.
+     * In the example, request #3 enables zoom settings override. Because the camera device
+     * can speed up applying zoom by 1 frame, the outputs of request #2 has 1.4x zoom, the
+     * value specified in request #3.</li>
+     * <li>The application "ramps out" of settings override by setting the control to OFF. In
+     * the example, request #5 changes the override to OFF. Because request #4's zoom
+     * takes effect in result #3, result #4's zoom remains the same until new value takes
+     * effect in result #5.</li>
+     * </ul>
+     */
+    ACAMERA_CONTROL_SETTINGS_OVERRIDE =                         // int32 (acamera_metadata_enum_android_control_settings_override_t)
+            ACAMERA_CONTROL_START + 49,
+    /**
+     * <p>List of available settings overrides supported by the camera device that can
+     * be used to speed up certain controls.</p>
+     *
+     * <p>Type: int32[n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>When not all controls within a CaptureRequest are required to take effect
+     * at the same time on the outputs, the camera device may apply certain request keys sooner
+     * to improve latency. This list contains such supported settings overrides. Each settings
+     * override corresponds to a set of CaptureRequest keys that can be sped up when applying.</p>
+     * <p>A supported settings override can be passed in via
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.html#CONTROL_SETTINGS_OVERRIDE">CaptureRequest#CONTROL_SETTINGS_OVERRIDE</a>, and the
+     * CaptureRequest keys corresponding to the override are applied as soon as possible, not
+     * bound by per-frame synchronization. See ACAMERA_CONTROL_SETTINGS_OVERRIDE for the
+     * CaptureRequest keys for each override.</p>
+     * <p>OFF is always included in this list.</p>
+     *
+     * @see ACAMERA_CONTROL_SETTINGS_OVERRIDE
+     */
+    ACAMERA_CONTROL_AVAILABLE_SETTINGS_OVERRIDES =              // int32[n]
+            ACAMERA_CONTROL_START + 50,
     ACAMERA_CONTROL_END,
 
     /**
@@ -3520,6 +3619,26 @@
      */
     ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP =      // int64[n*3] (acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t)
             ACAMERA_REQUEST_START + 19,
+    /**
+     * <p>A list of all possible color space profiles supported by a camera device.</p>
+     *
+     * <p>Type: int64[n*3] (acamera_metadata_enum_android_request_available_color_space_profiles_map_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>A color space profile is a combination of a color space, an image format, and a dynamic range
+     * profile. If a camera does not support the
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT">CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT</a>
+     * capability, the dynamic range profile will always be
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/params/DynamicRangeProfiles.html#STANDARD">DynamicRangeProfiles#STANDARD</a>. Camera clients can
+     * use <a href="https://developer.android.com/reference/android/hardware/camera2/params/SessionConfiguration.html#setColorSpace">SessionConfiguration#setColorSpace</a> to select
+     * a color space.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP =        // int64[n*3] (acamera_metadata_enum_android_request_available_color_space_profiles_map_t)
+            ACAMERA_REQUEST_START + 21,
     ACAMERA_REQUEST_END,
 
     /**
@@ -8475,6 +8594,40 @@
 
 } acamera_metadata_enum_android_control_extended_scene_mode_t;
 
+// ACAMERA_CONTROL_SETTINGS_OVERRIDE
+typedef enum acamera_metadata_enum_acamera_control_settings_override {
+    /**
+     * <p>No keys are applied sooner than the other keys when applying CaptureRequest
+     * settings to the camera device. This is the default value.</p>
+     */
+    ACAMERA_CONTROL_SETTINGS_OVERRIDE_OFF                            = 0,
+
+    /**
+     * <p>Zoom related keys are applied sooner than the other keys in the CaptureRequest. The
+     * zoom related keys are:</p>
+     * <ul>
+     * <li>ACAMERA_CONTROL_ZOOM_RATIO</li>
+     * <li>ACAMERA_SCALER_CROP_REGION</li>
+     * <li>ACAMERA_CONTROL_AE_REGIONS</li>
+     * <li>ACAMERA_CONTROL_AWB_REGIONS</li>
+     * <li>ACAMERA_CONTROL_AF_REGIONS</li>
+     * </ul>
+     * <p>Even though ACAMERA_CONTROL_AE_REGIONS, ACAMERA_CONTROL_AWB_REGIONS,
+     * and ACAMERA_CONTROL_AF_REGIONS are not directly zoom related, applications
+     * typically scale these regions together with ACAMERA_SCALER_CROP_REGION to have a
+     * consistent mapping within the current field of view. In this aspect, they are
+     * related to ACAMERA_SCALER_CROP_REGION and ACAMERA_CONTROL_ZOOM_RATIO.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_REGIONS
+     * @see ACAMERA_CONTROL_AF_REGIONS
+     * @see ACAMERA_CONTROL_AWB_REGIONS
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
+     * @see ACAMERA_SCALER_CROP_REGION
+     */
+    ACAMERA_CONTROL_SETTINGS_OVERRIDE_ZOOM                           = 1,
+
+} acamera_metadata_enum_android_control_settings_override_t;
+
 
 
 // ACAMERA_EDGE_MODE
@@ -9448,6 +9601,99 @@
 
 } acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t;
 
+// ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP
+typedef enum acamera_metadata_enum_acamera_request_available_color_space_profiles_map {
+    /**
+     * <p>Default value, when not explicitly specified. The Camera device will choose the color
+     * space to employ.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED   = -1,
+
+    /**
+     * <p>RGB color space sRGB standardized as IEC 61966-2.1:1999.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_SRGB          = 0,
+
+    /**
+     * <p>RGB color space sRGB standardized as IEC 61966-2.1:1999.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_LINEAR_SRGB   = 1,
+
+    /**
+     * <p>RGB color space scRGB-nl standardized as IEC 61966-2-2:2003.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_EXTENDED_SRGB = 2,
+
+    /**
+     * <p>RGB color space scRGB standardized as IEC 61966-2-2:2003.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_LINEAR_EXTENDED_SRGB
+                                                                      = 3,
+
+    /**
+     * <p>RGB color space BT.709 standardized as Rec. ITU-R BT.709-5.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_BT709         = 4,
+
+    /**
+     * <p>RGB color space BT.2020 standardized as Rec. ITU-R BT.2020-1.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_BT2020        = 5,
+
+    /**
+     * <p>RGB color space DCI-P3 standardized as SMPTE RP 431-2-2007.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_DCI_P3        = 6,
+
+    /**
+     * <p>RGB color space Display P3 based on SMPTE RP 431-2-2007 and IEC 61966-2.1:1999.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_DISPLAY_P3    = 7,
+
+    /**
+     * <p>RGB color space NTSC, 1953 standard.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_NTSC_1953     = 8,
+
+    /**
+     * <p>RGB color space SMPTE C.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_SMPTE_C       = 9,
+
+    /**
+     * <p>RGB color space Adobe RGB (1998).</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_ADOBE_RGB     = 10,
+
+    /**
+     * <p>RGB color space ProPhoto RGB standardized as ROMM RGB ISO 22028-2:2013.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_PRO_PHOTO_RGB = 11,
+
+    /**
+     * <p>RGB color space ACES standardized as SMPTE ST 2065-1:2012.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_ACES          = 12,
+
+    /**
+     * <p>RGB color space ACEScg standardized as Academy S-2014-004.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_ACESCG        = 13,
+
+    /**
+     * <p>XYZ color space CIE XYZ. This color space assumes standard illuminant D50 as its white
+     * point.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_CIE_XYZ       = 14,
+
+    /**
+     * <p>Lab color space CIE L<em>a</em>b*. This color space uses CIE XYZ D50 as a profile conversion
+     * space.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_CIE_LAB       = 15,
+
+} acamera_metadata_enum_android_request_available_color_space_profiles_map_t;
+
 
 // ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
 typedef enum acamera_metadata_enum_acamera_scaler_available_stream_configurations {
diff --git a/camera/ndk/ndk_vendor/impl/ACameraManager.cpp b/camera/ndk/ndk_vendor/impl/ACameraManager.cpp
index 77c934a..bb4ef56 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraManager.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraManager.cpp
@@ -165,7 +165,10 @@
     Mutex::Autolock _l(mLock);
     if (mCameraService != nullptr) {
         mCameraService->unlinkToDeath(mDeathNotifier);
-        mCameraService->removeListener(mCameraServiceListener);
+        auto stat = mCameraService->removeListener(mCameraServiceListener);
+        if (!stat.isOk()) {
+            ALOGE("Failed to remove listener to camera service %s", stat.description().c_str());
+        }
     }
     mDeathNotifier.clear();
     if (mCbLooper != nullptr) {
@@ -475,6 +478,10 @@
                 ALOGE("%s: Cannot find camera callback fp!", __FUNCTION__);
                 return;
             }
+            if (cb == nullptr) {
+                // Physical camera callback is null
+                return;
+            }
             found = msg->findPointer(kContextKey, &context);
             if (!found) {
                 ALOGE("%s: Cannot find callback context!", __FUNCTION__);
diff --git a/camera/tests/fuzzer/Android.bp b/camera/tests/fuzzer/Android.bp
new file mode 100644
index 0000000..bae8706
--- /dev/null
+++ b/camera/tests/fuzzer/Android.bp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_camera_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_camera_license"],
+}
+
+cc_defaults {
+    name: "camera_defaults",
+    static_libs: [
+        "libcamera_client",
+    ],
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "libutils",
+        "liblog",
+        "libbinder",
+        "libgui",
+        "libcamera_metadata",
+        "libnativewindow",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
+
+cc_fuzz {
+    name: "camera_fuzzer",
+    srcs: [
+        "camera_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_c2CaptureRequest_fuzzer",
+    srcs: [
+        "camera_c2CaptureRequest_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_c2ConcurrentCamera_fuzzer",
+    srcs: [
+        "camera_c2ConcurrentCamera_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_c2SubmitInfo_fuzzer",
+    srcs: [
+        "camera_c2SubmitInfo_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_c2SessionConfiguration_fuzzer",
+    srcs: [
+        "camera_c2SessionConfiguration_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_c2OutputConfiguration_fuzzer",
+    srcs: [
+        "camera_c2OutputConfiguration_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_vendorTagDescriptor_fuzzer",
+    srcs: [
+        "camera_vendorTagDescriptor_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+    include_dirs: [
+        "system/media/camera/tests",
+        "system/media/private/camera/include",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_Parameters_fuzzer",
+    srcs: [
+        "camera_Parameters_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_SessionStats_fuzzer",
+    srcs: [
+        "camera_SessionStats_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "camera_captureResult_fuzzer",
+    srcs: [
+        "camera_captureResult_fuzzer.cpp",
+    ],
+    defaults: [
+        "camera_defaults",
+    ],
+}
diff --git a/camera/tests/fuzzer/README.md b/camera/tests/fuzzer/README.md
new file mode 100644
index 0000000..c07ac04
--- /dev/null
+++ b/camera/tests/fuzzer/README.md
@@ -0,0 +1,74 @@
+# Fuzzers for libcamera_client
+
+## Plugin Design Considerations
+The fuzzer plugins for libcamera_client are designed based on the understanding of the
+source code and try to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzers.
+
+libcamera_client supports the following parameters:
+1. Command (parameter name: `cmd`)
+2. Video Buffer Mode (parameter name: `videoBufferMode`)
+3. Preview Callback Flag (parameter name: `previewCallbackFlag`)
+4. Facing (parameter name: `facing`)
+5. Orientation (parameter name: `orientation`)
+6. Format (parameter name: `format`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `cmd` | 0.`CAMERA_CMD_START_SMOOTH_ZOOM` 1.`CAMERA_CMD_STOP_SMOOTH_ZOOM` 3.`CAMERA_CMD_SET_DISPLAY_ORIENTATION` 4.`CAMERA_CMD_ENABLE_SHUTTER_SOUND` 5.`CAMERA_CMD_PLAY_RECORDING_SOUND` 6.`CAMERA_CMD_START_FACE_DETECTION` 7.`CAMERA_CMD_STOP_FACE_DETECTION` 8.`CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG` 9.`CAMERA_CMD_PING` 10.`CAMERA_CMD_SET_VIDEO_BUFFER_COUNT` 11.`CAMERA_CMD_SET_VIDEO_FORMAT`| Value obtained from FuzzedDataProvider|
+| `videoBufferMode` |0. `ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV` 1.`ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA` 2.`ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE`| Value obtained from FuzzedDataProvider|
+| `previewCallbackFlag` | 0. `CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK` 1.`CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK` 2.`CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK` 3.`CAMERA_FRAME_CALLBACK_FLAG_NOOP` 4.`CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER` 5.`CAMERA_FRAME_CALLBACK_FLAG_CAMERA` 6.`CAMERA_FRAME_CALLBACK_FLAG_BARCODE_SCANNER`| Value obtained from FuzzedDataProvider|
+| `facing` | 0.`android::hardware::CAMERA_FACING_BACK` 1.`android::hardware::CAMERA_FACING_FRONT`| Value obtained from FuzzedDataProvider|
+| `orientation` | 0.`0` 1.`90` 2.`180`3.`270`| Value obtained from FuzzedDataProvider|
+| `format` | 0.`CameraParameters::PIXEL_FORMAT_YUV422SP` 1.`CameraParameters::PIXEL_FORMAT_YUV420SP` 2.`CameraParameters::PIXEL_FORMAT_YUV422I` 3.`CameraParameters::PIXEL_FORMAT_YUV420P` 4.`CameraParameters::PIXEL_FORMAT_RGB565` 5.`CameraParameters::PIXEL_FORMAT_RGBA8888` 6.`CameraParameters::PIXEL_FORMAT_JPEG` 7.`CameraParameters::PIXEL_FORMAT_BAYER_RGGB` 8.`CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE`| Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugins are always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugins feed the entire input data to the module.
+This ensures that the plugins tolerate any kind of input (empty, huge,
+malformed, etc) and dont `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build camera_fuzzer, camera2CaptureRequest_fuzzer, camera2ConcurrentCamera_fuzzer, camera2SubmitInfo_fuzzer, camera2SessionConfiguration_fuzzer, camera2OutputConfiguration_fuzzer, vendorTagDescriptor_fuzzer, cameraParameters_fuzzer, cameraSessionStats_fuzzer and captureResult_fuzzer binaries
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) camera_fuzzer
+  $ mm -j$(nproc) camera_c2CaptureRequest_fuzzer
+  $ mm -j$(nproc) camera_c2ConcurrentCamera_fuzzer
+  $ mm -j$(nproc) camera_c2SubmitInfo_fuzzer
+  $ mm -j$(nproc) camera_c2SessionConfiguration_fuzzer
+  $ mm -j$(nproc) camera_c2OutputConfiguration_fuzzer
+  $ mm -j$(nproc) camera_vendorTagDescriptor_fuzzer
+  $ mm -j$(nproc) camera_Parameters_fuzzer
+  $ mm -j$(nproc) camera_SessionStats_fuzzer
+  $ mm -j$(nproc) camera_captureResult_fuzzer
+```
+#### Steps to run
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_fuzzer/camera_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_c2CaptureRequest_fuzzer/camera_c2CaptureRequest_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_c2ConcurrentCamera_fuzzer/camera_c2ConcurrentCamera_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_c2SubmitInfo_fuzzer/camera_c2SubmitInfo_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_c2SessionConfiguration_fuzzer/camera_c2SessionConfiguration_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_c2OutputConfiguration_fuzzer/camera_c2OutputConfiguration_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_vendorTagDescriptor_fuzzer/camera_vendorTagDescriptor_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_Parameters_fuzzer/camera_Parameters_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_SessionStats_fuzzer/camera_SessionStats_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/camera_captureResult_fuzzer/camera_captureResult_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/camera/tests/fuzzer/camera2common.h b/camera/tests/fuzzer/camera2common.h
new file mode 100644
index 0000000..14a1b1b
--- /dev/null
+++ b/camera/tests/fuzzer/camera2common.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef CAMERA2COMMON_H
+#define CAMERA2COMMON_H
+
+#include <binder/Parcel.h>
+
+using namespace android;
+
+template <class type>
+void invokeReadWriteNullParcel(type* obj) {
+    Parcel* parcelNull = nullptr;
+    obj->writeToParcel(parcelNull);
+    obj->readFromParcel(parcelNull);
+}
+
+template <class type>
+void invokeReadWriteNullParcelsp(sp<type> obj) {
+    Parcel* parcelNull = nullptr;
+    obj->writeToParcel(parcelNull);
+    obj->readFromParcel(parcelNull);
+}
+
+template <class type>
+void invokeReadWriteParcel(type* obj) {
+    Parcel* parcel = new Parcel();
+    obj->writeToParcel(parcel);
+    parcel->setDataPosition(0);
+    obj->readFromParcel(parcel);
+    delete parcel;
+}
+
+template <class type>
+void invokeReadWriteParcelsp(sp<type> obj) {
+    Parcel* parcel = new Parcel();
+    obj->writeToParcel(parcel);
+    parcel->setDataPosition(0);
+    obj->readFromParcel(parcel);
+    delete parcel;
+}
+
+#endif  // CAMERA2COMMON_H
diff --git a/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp b/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
new file mode 100644
index 0000000..45b3526
--- /dev/null
+++ b/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <CameraParameters.h>
+#include <CameraParameters2.h>
+#include <fcntl.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <utils/String16.h>
+
+using namespace std;
+using namespace android;
+
+string kValidFormats[] = {
+        CameraParameters::PIXEL_FORMAT_YUV422SP,      CameraParameters::PIXEL_FORMAT_YUV420SP,
+        CameraParameters::PIXEL_FORMAT_YUV422I,       CameraParameters::PIXEL_FORMAT_YUV420P,
+        CameraParameters::PIXEL_FORMAT_RGB565,        CameraParameters::PIXEL_FORMAT_RGBA8888,
+        CameraParameters::PIXEL_FORMAT_JPEG,          CameraParameters::PIXEL_FORMAT_BAYER_RGGB,
+        CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE};
+
+class CameraParametersFuzzer {
+  public:
+    void process(const uint8_t* data, size_t size);
+    ~CameraParametersFuzzer() {
+        delete mCameraParameters;
+        delete mCameraParameters2;
+    }
+
+  private:
+    void invokeCameraParameters();
+    template <class type>
+    void initCameraParameters(type** obj);
+    template <class type>
+    void cameraParametersCommon(type* obj);
+    CameraParameters* mCameraParameters = nullptr;
+    CameraParameters2* mCameraParameters2 = nullptr;
+    FuzzedDataProvider* mFDP = nullptr;
+};
+
+template <class type>
+void CameraParametersFuzzer::initCameraParameters(type** obj) {
+    if (mFDP->ConsumeBool()) {
+        *obj = new type();
+    } else {
+        string params;
+        if (mFDP->ConsumeBool()) {
+            int32_t width = mFDP->ConsumeIntegral<int32_t>();
+            int32_t height = mFDP->ConsumeIntegral<int32_t>();
+            int32_t minFps = mFDP->ConsumeIntegral<int32_t>();
+            int32_t maxFps = mFDP->ConsumeIntegral<int32_t>();
+            params = CameraParameters::KEY_SUPPORTED_VIDEO_SIZES;
+            params += '=' + to_string(width) + 'x' + to_string(height) + ';';
+            if (mFDP->ConsumeBool()) {
+                params += CameraParameters::KEY_PREVIEW_FPS_RANGE;
+                params += '=' + to_string(minFps) + ',' + to_string(maxFps) + ';';
+            }
+            if (mFDP->ConsumeBool()) {
+                params += CameraParameters::KEY_SUPPORTED_PICTURE_SIZES;
+                params += '=' + to_string(width) + 'x' + to_string(height) + ';';
+            }
+            if (mFDP->ConsumeBool()) {
+                params += CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS;
+                params += '=' + mFDP->PickValueInArray(kValidFormats) + ';';
+            }
+        } else {
+            params = mFDP->ConsumeRandomLengthString();
+        }
+        *obj = new type(String8(params.c_str()));
+    }
+}
+
+template <class type>
+void CameraParametersFuzzer::cameraParametersCommon(type* obj) {
+    Vector<Size> supportedPreviewSizes;
+    obj->getSupportedPreviewSizes(supportedPreviewSizes);
+    int32_t previewWidth = mFDP->ConsumeIntegral<int32_t>();
+    int32_t previewHeight = mFDP->ConsumeIntegral<int32_t>();
+    obj->setPreviewSize(previewWidth, previewHeight);
+    obj->getPreviewSize(&previewWidth, &previewHeight);
+
+    Vector<Size> supportedVideoSizes;
+    obj->getSupportedVideoSizes(supportedVideoSizes);
+    if (supportedVideoSizes.size() != 0) {
+        int32_t videoWidth, videoHeight, preferredVideoWidth, preferredVideoHeight;
+        if (mFDP->ConsumeBool()) {
+            int32_t idx = mFDP->ConsumeIntegralInRange<int32_t>(0, supportedVideoSizes.size() - 1);
+            obj->setVideoSize(supportedVideoSizes[idx].width, supportedVideoSizes[idx].height);
+        } else {
+            videoWidth = mFDP->ConsumeIntegral<int32_t>();
+            videoHeight = mFDP->ConsumeIntegral<int32_t>();
+            obj->setVideoSize(videoWidth, videoHeight);
+        }
+        obj->getVideoSize(&videoWidth, &videoHeight);
+        obj->getPreferredPreviewSizeForVideo(&preferredVideoWidth, &preferredVideoHeight);
+    }
+
+    int32_t fps = mFDP->ConsumeIntegral<int32_t>();
+    obj->setPreviewFrameRate(fps);
+    obj->getPreviewFrameRate();
+    string previewFormat = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
+                                               : mFDP->ConsumeRandomLengthString();
+    obj->setPreviewFormat(previewFormat.c_str());
+
+    int32_t pictureWidth = mFDP->ConsumeIntegral<int32_t>();
+    int32_t pictureHeight = mFDP->ConsumeIntegral<int32_t>();
+    Vector<Size> supportedPictureSizes;
+    obj->setPictureSize(pictureWidth, pictureHeight);
+    obj->getPictureSize(&pictureWidth, &pictureHeight);
+    obj->getSupportedPictureSizes(supportedPictureSizes);
+    string pictureFormat = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
+                                               : mFDP->ConsumeRandomLengthString();
+    obj->setPictureFormat(pictureFormat.c_str());
+    obj->getPictureFormat();
+
+    if (mFDP->ConsumeBool()) {
+        obj->dump();
+    } else {
+        int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+        Vector<String16> args = {};
+        obj->dump(fd, args);
+        close(fd);
+    }
+}
+
+void CameraParametersFuzzer::invokeCameraParameters() {
+    initCameraParameters<CameraParameters>(&mCameraParameters);
+    cameraParametersCommon<CameraParameters>(mCameraParameters);
+    initCameraParameters<CameraParameters2>(&mCameraParameters2);
+    cameraParametersCommon<CameraParameters2>(mCameraParameters2);
+
+    int32_t minFPS, maxFPS;
+    mCameraParameters->getPreviewFpsRange(&minFPS, &maxFPS);
+    string format = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
+                                        : mFDP->ConsumeRandomLengthString();
+    mCameraParameters->previewFormatToEnum(format.c_str());
+    mCameraParameters->isEmpty();
+    Vector<int32_t> formats;
+    mCameraParameters->getSupportedPreviewFormats(formats);
+}
+
+void CameraParametersFuzzer::process(const uint8_t* data, size_t size) {
+    mFDP = new FuzzedDataProvider(data, size);
+    invokeCameraParameters();
+    delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    CameraParametersFuzzer cameraParametersFuzzer;
+    cameraParametersFuzzer.process(data, size);
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_SessionStats_fuzzer.cpp b/camera/tests/fuzzer/camera_SessionStats_fuzzer.cpp
new file mode 100644
index 0000000..5866aaf
--- /dev/null
+++ b/camera/tests/fuzzer/camera_SessionStats_fuzzer.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <CameraSessionStats.h>
+#include <binder/Parcel.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+using namespace android::hardware;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    CameraStreamStats* cameraStreamStats = nullptr;
+    Parcel parcelCamStreamStats;
+
+    if (fdp.ConsumeBool()) {
+        cameraStreamStats = new CameraStreamStats();
+    } else {
+        int32_t width = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(width);
+        }
+        int32_t height = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(height);
+        }
+        int32_t format = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(format);
+        }
+        float maxPreviewFps = fdp.ConsumeFloatingPoint<float>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeFloat(maxPreviewFps);
+        }
+        int32_t dataSpace = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(dataSpace);
+        }
+        int64_t usage = fdp.ConsumeIntegral<int64_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt64(usage);
+        }
+        int64_t requestCount = fdp.ConsumeIntegral<int64_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt64(requestCount);
+        }
+        int64_t errorCount = fdp.ConsumeIntegral<int64_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt64(errorCount);
+        }
+        int32_t maxHalBuffers = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(maxHalBuffers);
+        }
+        int32_t maxAppBuffers = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(maxAppBuffers);
+        }
+        int32_t dynamicRangeProfile = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(dynamicRangeProfile);
+        }
+        int32_t streamUseCase = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(streamUseCase);
+        }
+        int32_t colorSpace = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamStreamStats.writeInt32(colorSpace);
+        }
+
+        cameraStreamStats = new CameraStreamStats(width, height, format, maxPreviewFps, dataSpace,
+                                                  usage, maxHalBuffers, maxAppBuffers,
+                                                  dynamicRangeProfile, streamUseCase, colorSpace);
+    }
+
+    parcelCamStreamStats.setDataPosition(0);
+    cameraStreamStats->readFromParcel(&parcelCamStreamStats);
+    invokeReadWriteNullParcel<CameraStreamStats>(cameraStreamStats);
+    invokeReadWriteParcel<CameraStreamStats>(cameraStreamStats);
+
+    CameraSessionStats* cameraSessionStats = nullptr;
+    Parcel parcelCamSessionStats;
+
+    if (fdp.ConsumeBool()) {
+        cameraSessionStats = new CameraSessionStats();
+    } else {
+        string camId = fdp.ConsumeRandomLengthString();
+        String16 cameraId(camId.c_str());
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeString16(cameraId);
+        }
+        int32_t facing = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeInt32(facing);
+        }
+        int32_t newCameraState = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeInt32(newCameraState);
+        }
+        string name = fdp.ConsumeRandomLengthString();
+        String16 clientName(name.c_str());
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeString16(clientName);
+        }
+        int32_t apiLevel = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeInt32(apiLevel);
+        }
+        bool isNdk = fdp.ConsumeBool();
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeBool(isNdk);
+        }
+        int32_t latencyMs = fdp.ConsumeIntegral<int32_t>();
+        if (fdp.ConsumeBool()) {
+            parcelCamSessionStats.writeInt32(latencyMs);
+        }
+
+        cameraSessionStats = new CameraSessionStats(cameraId, facing, newCameraState, clientName,
+                                                    apiLevel, isNdk, latencyMs);
+    }
+
+    if (fdp.ConsumeBool()) {
+        int32_t internalReconfigure = fdp.ConsumeIntegral<int32_t>();
+        parcelCamSessionStats.writeInt32(internalReconfigure);
+    }
+
+    if (fdp.ConsumeBool()) {
+        int64_t requestCount = fdp.ConsumeIntegral<int64_t>();
+        parcelCamSessionStats.writeInt64(requestCount);
+    }
+
+    if (fdp.ConsumeBool()) {
+        int64_t resultErrorCount = fdp.ConsumeIntegral<int64_t>();
+        parcelCamSessionStats.writeInt64(resultErrorCount);
+    }
+
+    if (fdp.ConsumeBool()) {
+        bool deviceError = fdp.ConsumeBool();
+        parcelCamSessionStats.writeBool(deviceError);
+    }
+
+    parcelCamSessionStats.setDataPosition(0);
+    cameraSessionStats->readFromParcel(&parcelCamSessionStats);
+    invokeReadWriteNullParcel<CameraSessionStats>(cameraSessionStats);
+    invokeReadWriteParcel<CameraSessionStats>(cameraSessionStats);
+
+    delete cameraStreamStats;
+    delete cameraSessionStats;
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp b/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
new file mode 100644
index 0000000..06215a5
--- /dev/null
+++ b/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <CameraMetadata.h>
+#include <camera2/CaptureRequest.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <gui/view/Surface.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+
+constexpr int32_t kNonZeroRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+constexpr int32_t kSizeMin = 1;
+constexpr int32_t kSizeMax = 1000;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    sp<CaptureRequest> captureRequest = new CaptureRequest();
+    Parcel parcelCamCaptureReq;
+
+    size_t physicalCameraSettingsSize =
+            fdp.ConsumeIntegralInRange<size_t>(kNonZeroRangeMin, kRangeMax);
+    if (fdp.ConsumeBool()) {
+        parcelCamCaptureReq.writeInt32(physicalCameraSettingsSize);
+    }
+
+    for (size_t idx = 0; idx < physicalCameraSettingsSize; ++idx) {
+        string id = fdp.ConsumeRandomLengthString();
+        if (fdp.ConsumeBool()) {
+            parcelCamCaptureReq.writeString16(String16(id.c_str()));
+        }
+        CameraMetadata cameraMetadata;
+        if (fdp.ConsumeBool()) {
+            cameraMetadata = CameraMetadata();
+        } else {
+            size_t entryCapacity = fdp.ConsumeIntegralInRange<size_t>(kNonZeroRangeMin, kRangeMax);
+            size_t dataCapacity = fdp.ConsumeIntegralInRange<size_t>(kNonZeroRangeMin, kRangeMax);
+            cameraMetadata = CameraMetadata(entryCapacity, dataCapacity);
+        }
+        captureRequest->mPhysicalCameraSettings.push_back({id, cameraMetadata});
+        if (fdp.ConsumeBool()) {
+            cameraMetadata.writeToParcel(&parcelCamCaptureReq);
+        }
+    }
+
+    captureRequest->mIsReprocess = fdp.ConsumeBool();
+    if (fdp.ConsumeBool()) {
+        parcelCamCaptureReq.writeInt32(captureRequest->mIsReprocess);
+    }
+
+    captureRequest->mSurfaceConverted = fdp.ConsumeBool();
+    if (fdp.ConsumeBool() && captureRequest->mSurfaceConverted) {
+        // 0-sized array
+        parcelCamCaptureReq.writeInt32(0);
+    }
+
+    if (!captureRequest->mSurfaceConverted) {
+        size_t surfaceListSize = fdp.ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+        if (fdp.ConsumeBool()) {
+            parcelCamCaptureReq.writeInt32(surfaceListSize);
+        }
+        for (size_t idx = 0; idx < surfaceListSize; ++idx) {
+            sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+            sp<SurfaceControl> surfaceControl = composerClient->createSurface(
+                    static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
+                    fdp.ConsumeIntegral<uint32_t>() /* width */,
+                    fdp.ConsumeIntegral<uint32_t>() /* height */,
+                    fdp.ConsumeIntegral<int32_t>() /* format */,
+                    fdp.ConsumeIntegral<int32_t>() /* flags */);
+            if (surfaceControl) {
+                sp<Surface> surface = surfaceControl->getSurface();
+                captureRequest->mSurfaceList.push_back(surface);
+                if (fdp.ConsumeBool()) {
+                    view::Surface surfaceShim;
+                    surfaceShim.name = String16((fdp.ConsumeRandomLengthString()).c_str());
+                    surfaceShim.graphicBufferProducer = surface->getIGraphicBufferProducer();
+                    surfaceShim.writeToParcel(&parcelCamCaptureReq);
+                }
+                surface.clear();
+            }
+            composerClient.clear();
+            surfaceControl.clear();
+        }
+    }
+
+    size_t indexListSize = fdp.ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+    if (fdp.ConsumeBool()) {
+        parcelCamCaptureReq.writeInt32(indexListSize);
+    }
+
+    for (size_t idx = 0; idx < indexListSize; ++idx) {
+        int32_t streamIdx = fdp.ConsumeIntegral<int32_t>();
+        int32_t surfaceIdx = fdp.ConsumeIntegral<int32_t>();
+        captureRequest->mStreamIdxList.push_back(streamIdx);
+        captureRequest->mSurfaceIdxList.push_back(surfaceIdx);
+        if (fdp.ConsumeBool()) {
+            parcelCamCaptureReq.writeInt32(streamIdx);
+        }
+        if (fdp.ConsumeBool()) {
+            parcelCamCaptureReq.writeInt32(surfaceIdx);
+        }
+    }
+
+    invokeReadWriteParcelsp<CaptureRequest>(captureRequest);
+    invokeReadWriteNullParcelsp<CaptureRequest>(captureRequest);
+    parcelCamCaptureReq.setDataPosition(0);
+    captureRequest->readFromParcel(&parcelCamCaptureReq);
+    captureRequest.clear();
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_c2ConcurrentCamera_fuzzer.cpp b/camera/tests/fuzzer/camera_c2ConcurrentCamera_fuzzer.cpp
new file mode 100644
index 0000000..12b5bc3
--- /dev/null
+++ b/camera/tests/fuzzer/camera_c2ConcurrentCamera_fuzzer.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <camera2/ConcurrentCamera.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+using namespace android::hardware::camera2::utils;
+
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    ConcurrentCameraIdCombination camIdCombination;
+
+    if (fdp.ConsumeBool()) {
+        size_t concurrentCameraIdSize = fdp.ConsumeIntegralInRange<size_t>(kRangeMin, kRangeMax);
+        for (size_t idx = 0; idx < concurrentCameraIdSize; ++idx) {
+            string concurrentCameraId = fdp.ConsumeRandomLengthString();
+            camIdCombination.mConcurrentCameraIds.push_back(concurrentCameraId);
+        }
+    }
+
+    invokeReadWriteNullParcel<ConcurrentCameraIdCombination>(&camIdCombination);
+    invokeReadWriteParcel<ConcurrentCameraIdCombination>(&camIdCombination);
+
+    CameraIdAndSessionConfiguration camIdAndSessionConfig;
+
+    if (fdp.ConsumeBool()) {
+        camIdAndSessionConfig.mCameraId = fdp.ConsumeRandomLengthString();
+        if (fdp.ConsumeBool()) {
+            camIdAndSessionConfig.mSessionConfiguration = SessionConfiguration();
+        } else {
+            int32_t inputWidth = fdp.ConsumeIntegral<int32_t>();
+            int32_t inputHeight = fdp.ConsumeIntegral<int32_t>();
+            int32_t inputFormat = fdp.ConsumeIntegral<int32_t>();
+            int32_t operatingMode = fdp.ConsumeIntegral<int32_t>();
+            camIdAndSessionConfig.mSessionConfiguration =
+                    SessionConfiguration(inputWidth, inputHeight, inputFormat, operatingMode);
+        }
+    }
+
+    invokeReadWriteNullParcel<CameraIdAndSessionConfiguration>(&camIdAndSessionConfig);
+    invokeReadWriteParcel<CameraIdAndSessionConfiguration>(&camIdAndSessionConfig);
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp b/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
new file mode 100644
index 0000000..51ac4e8
--- /dev/null
+++ b/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <camera2/OutputConfiguration.h>
+#include <camera2/SessionConfiguration.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+using namespace android::hardware::camera2::params;
+
+constexpr int32_t kSizeMin = 0;
+constexpr int32_t kSizeMax = 1000;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    OutputConfiguration* outputConfiguration = nullptr;
+
+    if (fdp.ConsumeBool()) {
+        outputConfiguration = new OutputConfiguration();
+    } else {
+        int32_t rotation = fdp.ConsumeIntegral<int32_t>();
+        string phyCameraId = fdp.ConsumeRandomLengthString();
+        String16 physicalCameraId(phyCameraId.c_str());
+        int32_t surfaceSetID = fdp.ConsumeIntegral<int32_t>();
+        bool isShared = fdp.ConsumeBool();
+
+        if (fdp.ConsumeBool()) {
+            sp<IGraphicBufferProducer> iGBP = nullptr;
+            sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+            sp<SurfaceControl> surfaceControl = composerClient->createSurface(
+                    static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
+                    fdp.ConsumeIntegral<uint32_t>() /* width */,
+                    fdp.ConsumeIntegral<uint32_t>() /* height */,
+                    fdp.ConsumeIntegral<int32_t>() /* format */,
+                    fdp.ConsumeIntegral<int32_t>() /* flags */);
+            if (surfaceControl) {
+                sp<Surface> surface = surfaceControl->getSurface();
+                iGBP = surface->getIGraphicBufferProducer();
+            }
+            outputConfiguration = new OutputConfiguration(iGBP, rotation, physicalCameraId,
+                                                          surfaceSetID, isShared);
+            iGBP.clear();
+            composerClient.clear();
+            surfaceControl.clear();
+        } else {
+            size_t iGBPSize = fdp.ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+            vector<sp<IGraphicBufferProducer>> iGBPs;
+            for (size_t idx = 0; idx < iGBPSize; ++idx) {
+                sp<IGraphicBufferProducer> iGBP = nullptr;
+                sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+                sp<SurfaceControl> surfaceControl = composerClient->createSurface(
+                        static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
+                        fdp.ConsumeIntegral<uint32_t>() /* width */,
+                        fdp.ConsumeIntegral<uint32_t>() /* height */,
+                        fdp.ConsumeIntegral<int32_t>() /* format */,
+                        fdp.ConsumeIntegral<int32_t>() /* flags */);
+                if (surfaceControl) {
+                    sp<Surface> surface = surfaceControl->getSurface();
+                    iGBP = surface->getIGraphicBufferProducer();
+                    iGBPs.push_back(iGBP);
+                }
+                iGBP.clear();
+                composerClient.clear();
+                surfaceControl.clear();
+            }
+            outputConfiguration = new OutputConfiguration(iGBPs, rotation, physicalCameraId,
+                                                          surfaceSetID, isShared);
+        }
+    }
+
+    outputConfiguration->getRotation();
+    outputConfiguration->getSurfaceSetID();
+    outputConfiguration->getSurfaceType();
+    outputConfiguration->getWidth();
+    outputConfiguration->getHeight();
+    outputConfiguration->isDeferred();
+    outputConfiguration->isShared();
+    outputConfiguration->getPhysicalCameraId();
+
+    OutputConfiguration outputConfiguration2;
+    outputConfiguration->gbpsEqual(outputConfiguration2);
+    outputConfiguration->sensorPixelModesUsedEqual(outputConfiguration2);
+    outputConfiguration->gbpsLessThan(outputConfiguration2);
+    outputConfiguration->sensorPixelModesUsedLessThan(outputConfiguration2);
+    outputConfiguration->getGraphicBufferProducers();
+    sp<IGraphicBufferProducer> gbp;
+    outputConfiguration->addGraphicProducer(gbp);
+    invokeReadWriteNullParcel<OutputConfiguration>(outputConfiguration);
+    invokeReadWriteParcel<OutputConfiguration>(outputConfiguration);
+    delete outputConfiguration;
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_c2SessionConfiguration_fuzzer.cpp b/camera/tests/fuzzer/camera_c2SessionConfiguration_fuzzer.cpp
new file mode 100644
index 0000000..b2de95d
--- /dev/null
+++ b/camera/tests/fuzzer/camera_c2SessionConfiguration_fuzzer.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <camera2/OutputConfiguration.h>
+#include <camera2/SessionConfiguration.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+using namespace android::hardware::camera2::params;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    SessionConfiguration* sessionConfiguration = nullptr;
+
+    if (fdp.ConsumeBool()) {
+        sessionConfiguration = new SessionConfiguration();
+    } else {
+        int32_t inputWidth = fdp.ConsumeIntegral<int32_t>();
+        int32_t inputHeight = fdp.ConsumeIntegral<int32_t>();
+        int32_t inputFormat = fdp.ConsumeIntegral<int32_t>();
+        int32_t operatingMode = fdp.ConsumeIntegral<int32_t>();
+        sessionConfiguration =
+                new SessionConfiguration(inputWidth, inputHeight, inputFormat, operatingMode);
+    }
+
+    sessionConfiguration->getInputWidth();
+    sessionConfiguration->getInputHeight();
+    sessionConfiguration->getInputFormat();
+    sessionConfiguration->getOperatingMode();
+
+    OutputConfiguration* outputConfiguration = nullptr;
+
+    if (fdp.ConsumeBool()) {
+        outputConfiguration = new OutputConfiguration();
+        sessionConfiguration->addOutputConfiguration(*outputConfiguration);
+    } else {
+        sp<IGraphicBufferProducer> iGBP = nullptr;
+        sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+        sp<SurfaceControl> surfaceControl = composerClient->createSurface(
+                static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()),
+                fdp.ConsumeIntegral<uint32_t>(), fdp.ConsumeIntegral<uint32_t>(),
+                fdp.ConsumeIntegral<int32_t>(), fdp.ConsumeIntegral<int32_t>());
+        if (surfaceControl) {
+            sp<Surface> surface = surfaceControl->getSurface();
+            iGBP = surface->getIGraphicBufferProducer();
+            surface.clear();
+        }
+        int32_t rotation = fdp.ConsumeIntegral<int32_t>();
+        string phyCameraId = fdp.ConsumeRandomLengthString();
+        String16 physicalCameraId(phyCameraId.c_str());
+        int32_t surfaceSetID = fdp.ConsumeIntegral<int32_t>();
+        bool isShared = fdp.ConsumeBool();
+        outputConfiguration =
+                new OutputConfiguration(iGBP, rotation, physicalCameraId, surfaceSetID, isShared);
+        sessionConfiguration->addOutputConfiguration(*outputConfiguration);
+    }
+
+    sessionConfiguration->getOutputConfigurations();
+    SessionConfiguration sessionConfiguration2;
+    sessionConfiguration->outputsEqual(sessionConfiguration2);
+    sessionConfiguration->outputsLessThan(sessionConfiguration2);
+    sessionConfiguration->inputIsMultiResolution();
+
+    invokeReadWriteNullParcel<SessionConfiguration>(sessionConfiguration);
+    invokeReadWriteParcel<SessionConfiguration>(sessionConfiguration);
+
+    delete sessionConfiguration;
+    delete outputConfiguration;
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp b/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
new file mode 100644
index 0000000..dc40b0f
--- /dev/null
+++ b/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <camera2/SubmitInfo.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+using namespace android::hardware::camera2::utils;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    SubmitInfo submitInfo;
+    submitInfo.mRequestId = fdp.ConsumeIntegral<int32_t>();
+    submitInfo.mLastFrameNumber = fdp.ConsumeIntegral<int64_t>();
+    invokeReadWriteParcel<SubmitInfo>(&submitInfo);
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_captureResult_fuzzer.cpp b/camera/tests/fuzzer/camera_captureResult_fuzzer.cpp
new file mode 100644
index 0000000..03cf9c4
--- /dev/null
+++ b/camera/tests/fuzzer/camera_captureResult_fuzzer.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <CaptureResult.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+using namespace android::hardware::camera2::impl;
+
+constexpr int32_t kSizeMin = 0;
+constexpr int32_t kSizeMax = 1000;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    PhysicalCaptureResultInfo* physicalCaptureResultInfo = nullptr;
+
+    if (fdp.ConsumeBool()) {
+        physicalCaptureResultInfo = new PhysicalCaptureResultInfo();
+    } else {
+        string camId = fdp.ConsumeRandomLengthString();
+        String16 cameraId(camId.c_str());
+        CameraMetadata cameraMetadata = CameraMetadata();
+        physicalCaptureResultInfo = new PhysicalCaptureResultInfo(cameraId, cameraMetadata);
+    }
+
+    invokeReadWriteParcel<PhysicalCaptureResultInfo>(physicalCaptureResultInfo);
+
+    CaptureResult* captureResult = new CaptureResult();
+
+    if (fdp.ConsumeBool()) {
+        captureResult->mMetadata = CameraMetadata();
+    }
+    if (fdp.ConsumeBool()) {
+        captureResult->mResultExtras = CaptureResultExtras();
+        string errCamId = fdp.ConsumeRandomLengthString();
+        String16 errCameraId(errCamId.c_str());
+        captureResult->mResultExtras.errorPhysicalCameraId = errCameraId;
+        captureResult->mResultExtras.isValid();
+        invokeReadWriteNullParcel<CaptureResultExtras>(&(captureResult->mResultExtras));
+    }
+    if (fdp.ConsumeBool()) {
+        size_t physicalMetadatasSize = fdp.ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+        for (size_t idx = 0; idx < physicalMetadatasSize; ++idx) {
+            captureResult->mPhysicalMetadatas.push_back(PhysicalCaptureResultInfo());
+        }
+    }
+
+    invokeReadWriteNullParcel<CaptureResult>(captureResult);
+    invokeReadWriteParcel<CaptureResult>(captureResult);
+    CaptureResult captureResult2(*captureResult);
+    CaptureResult captureResult3(move(captureResult2));
+
+    delete captureResult;
+    delete physicalCaptureResultInfo;
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_fuzzer.cpp b/camera/tests/fuzzer/camera_fuzzer.cpp
new file mode 100644
index 0000000..f45500e
--- /dev/null
+++ b/camera/tests/fuzzer/camera_fuzzer.cpp
@@ -0,0 +1,404 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <Camera.h>
+#include <CameraBase.h>
+#include <CameraMetadata.h>
+#include <CameraParameters.h>
+#include <CameraUtils.h>
+#include <VendorTagDescriptor.h>
+#include <binder/IMemory.h>
+#include <binder/MemoryDealer.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <utils/Log.h>
+#include "camera2common.h"
+#include <android/hardware/ICameraService.h>
+
+using namespace std;
+using namespace android;
+using namespace android::hardware;
+
+constexpr int32_t kFrameRateMin = 1;
+constexpr int32_t kFrameRateMax = 120;
+constexpr int32_t kCamIdMin = 0;
+constexpr int32_t kCamIdMax = 1;
+constexpr int32_t kNumMin = 0;
+constexpr int32_t kNumMax = 1024;
+constexpr int32_t kMemoryDealerSize = 1000;
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+constexpr int32_t kSizeMin = 0;
+constexpr int32_t kSizeMax = 1000;
+
+constexpr int32_t kValidCMD[] = {CAMERA_CMD_START_SMOOTH_ZOOM,
+                                 CAMERA_CMD_STOP_SMOOTH_ZOOM,
+                                 CAMERA_CMD_SET_DISPLAY_ORIENTATION,
+                                 CAMERA_CMD_ENABLE_SHUTTER_SOUND,
+                                 CAMERA_CMD_PLAY_RECORDING_SOUND,
+                                 CAMERA_CMD_START_FACE_DETECTION,
+                                 CAMERA_CMD_STOP_FACE_DETECTION,
+                                 CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG,
+                                 CAMERA_CMD_PING,
+                                 CAMERA_CMD_SET_VIDEO_BUFFER_COUNT,
+                                 CAMERA_CMD_SET_VIDEO_FORMAT};
+
+constexpr int32_t kValidVideoBufferMode[] = {ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV,
+                                             ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA,
+                                             ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE};
+
+constexpr int32_t kValidPreviewCallbackFlag[] = {
+        CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK,    CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK,
+        CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK,  CAMERA_FRAME_CALLBACK_FLAG_NOOP,
+        CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER,      CAMERA_FRAME_CALLBACK_FLAG_CAMERA,
+        CAMERA_FRAME_CALLBACK_FLAG_BARCODE_SCANNER};
+
+constexpr int32_t kValidFacing[] = {android::hardware::CAMERA_FACING_BACK,
+                                    android::hardware::CAMERA_FACING_FRONT};
+
+constexpr int32_t kValidOrientation[] = {0, 90, 180, 270};
+
+class TestCameraListener : public CameraListener {
+  public:
+    virtual ~TestCameraListener() = default;
+
+    void notify(int32_t /*msgType*/, int32_t /*ext1*/, int32_t /*ext2*/) override { return; };
+    void postData(int32_t /*msgType*/, const sp<IMemory>& /*dataPtr*/,
+                  camera_frame_metadata_t* /*metadata*/) override {
+        return;
+    };
+    void postDataTimestamp(nsecs_t /*timestamp*/, int32_t /*msgType*/,
+                           const sp<IMemory>& /*dataPtr*/) override {
+        return;
+    };
+    void postRecordingFrameHandleTimestamp(nsecs_t /*timestamp*/,
+                                           native_handle_t* /*handle*/) override {
+        return;
+    };
+    void postRecordingFrameHandleTimestampBatch(
+            const std::vector<nsecs_t>& /*timestamps*/,
+            const std::vector<native_handle_t*>& /*handles*/) override {
+        return;
+    };
+};
+
+class CameraFuzzer : public ::android::hardware::BnCameraClient {
+  public:
+    void process(const uint8_t* data, size_t size);
+    ~CameraFuzzer() {
+        delete mCameraMetadata;
+        mComposerClient.clear();
+        mSurfaceControl.clear();
+        mSurface.clear();
+        mCamera.clear();
+        mMemoryDealer.clear();
+        mIMem.clear();
+        mCameraListener.clear();
+        mCameraService.clear();
+    }
+
+  private:
+    bool initCamera();
+    void initCameraMetadata();
+    void invokeCamera();
+    void invokeCameraUtils();
+    void invokeCameraBase();
+    void invokeCameraMetadata();
+    void invokeSetParameters();
+    sp<Camera> mCamera = nullptr;
+    CameraMetadata* mCameraMetadata = nullptr;
+    sp<SurfaceComposerClient> mComposerClient = nullptr;
+    sp<SurfaceControl> mSurfaceControl = nullptr;
+    sp<Surface> mSurface = nullptr;
+    sp<MemoryDealer> mMemoryDealer = nullptr;
+    sp<IMemory> mIMem = nullptr;
+    sp<TestCameraListener> mCameraListener = nullptr;
+    sp<ICameraService> mCameraService = nullptr;
+    sp<ICamera> cameraDevice = nullptr;
+    FuzzedDataProvider* mFDP = nullptr;
+
+    // CameraClient interface
+    void notifyCallback(int32_t, int32_t, int32_t) override { return; };
+    void dataCallback(int32_t, const sp<IMemory>&, camera_frame_metadata_t*) override { return; };
+    void dataCallbackTimestamp(nsecs_t, int32_t, const sp<IMemory>&) override { return; };
+    void recordingFrameHandleCallbackTimestamp(nsecs_t, native_handle_t*) override { return; };
+    void recordingFrameHandleCallbackTimestampBatch(const std::vector<nsecs_t>&,
+                                                    const std::vector<native_handle_t*>&) override {
+        return;
+    };
+};
+
+bool CameraFuzzer::initCamera() {
+    ProcessState::self()->startThreadPool();
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.camera"));
+    mCameraService = interface_cast<ICameraService>(binder);
+    mCameraService->connect(this, mFDP->ConsumeIntegral<int32_t>() /* cameraId */,
+                            String16("CAMERAFUZZ"), hardware::ICameraService::USE_CALLING_UID,
+                            hardware::ICameraService::USE_CALLING_PID,
+                            /*targetSdkVersion*/ __ANDROID_API_FUTURE__, &cameraDevice);
+    mCamera = Camera::create(cameraDevice);
+    if (!mCamera) {
+        return false;
+    }
+    return true;
+}
+
+void CameraFuzzer::invokeSetParameters() {
+    String8 s = mCamera->getParameters();
+    CameraParameters params(s);
+    int32_t width = mFDP->ConsumeIntegral<int32_t>();
+    int32_t height = mFDP->ConsumeIntegral<int32_t>();
+    params.setVideoSize(width, height);
+    int32_t frameRate = mFDP->ConsumeIntegralInRange<int32_t>(kFrameRateMin, kFrameRateMax);
+    params.setPreviewFrameRate(frameRate);
+    mCamera->setParameters(params.flatten());
+}
+
+void CameraFuzzer::invokeCamera() {
+    if (!initCamera()) {
+        return;
+    }
+
+    int32_t cameraId = mFDP->ConsumeIntegralInRange<int32_t>(kCamIdMin, kCamIdMax);
+    Camera::getNumberOfCameras();
+    CameraInfo cameraInfo;
+    cameraInfo.facing = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFacing)
+                                            : mFDP->ConsumeIntegral<int>();
+    cameraInfo.orientation = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidOrientation)
+                                                 : mFDP->ConsumeIntegral<int>();
+    Camera::getCameraInfo(cameraId, &cameraInfo);
+    mCamera->reconnect();
+
+    mComposerClient = new SurfaceComposerClient;
+    mSurfaceControl = mComposerClient->createSurface(
+            static_cast<String8>(mFDP->ConsumeRandomLengthString().c_str()) /* name */,
+            mFDP->ConsumeIntegral<uint32_t>() /* width */,
+            mFDP->ConsumeIntegral<uint32_t>() /* height */,
+            mFDP->ConsumeIntegral<int32_t>() /* format */,
+            mFDP->ConsumeIntegral<int32_t>() /* flags */);
+    if (mSurfaceControl) {
+        mSurface = mSurfaceControl->getSurface();
+        mCamera->setPreviewTarget(mSurface->getIGraphicBufferProducer());
+        mCamera->startPreview();
+        mCamera->stopPreview();
+        mCamera->previewEnabled();
+        mCamera->startRecording();
+        mCamera->stopRecording();
+    }
+
+    mCamera->lock();
+    mCamera->unlock();
+    mCamera->autoFocus();
+    mCamera->cancelAutoFocus();
+
+    int32_t msgType = mFDP->ConsumeIntegral<int32_t>();
+    mCamera->takePicture(msgType);
+    invokeSetParameters();
+    int32_t cmd;
+    if (mFDP->ConsumeBool()) {
+        cmd = mFDP->PickValueInArray(kValidCMD);
+    } else {
+        cmd = mFDP->ConsumeIntegral<int32_t>();
+    }
+    int32_t arg1 = mFDP->ConsumeIntegral<int32_t>();
+    int32_t arg2 = mFDP->ConsumeIntegral<int32_t>();
+    mCamera->sendCommand(cmd, arg1, arg2);
+
+    int32_t videoBufferMode = mFDP->PickValueInArray(kValidVideoBufferMode);
+    mCamera->setVideoBufferMode(videoBufferMode);
+    if (mSurfaceControl) {
+        mSurface = mSurfaceControl->getSurface();
+        mCamera->setVideoTarget(mSurface->getIGraphicBufferProducer());
+    }
+    mCameraListener = sp<TestCameraListener>::make();
+    mCamera->setListener(mCameraListener);
+    int32_t previewCallbackFlag;
+    if (mFDP->ConsumeBool()) {
+        previewCallbackFlag = mFDP->PickValueInArray(kValidPreviewCallbackFlag);
+    } else {
+        previewCallbackFlag = mFDP->ConsumeIntegral<int32_t>();
+    }
+    mCamera->setPreviewCallbackFlags(previewCallbackFlag);
+    if (mSurfaceControl) {
+        mSurface = mSurfaceControl->getSurface();
+        mCamera->setPreviewCallbackTarget(mSurface->getIGraphicBufferProducer());
+    }
+
+    mCamera->getRecordingProxy();
+    int32_t mode = mFDP->ConsumeIntegral<int32_t>();
+    mCamera->setAudioRestriction(mode);
+    mCamera->getGlobalAudioRestriction();
+    mCamera->recordingEnabled();
+
+    mMemoryDealer = new MemoryDealer(kMemoryDealerSize);
+    mIMem = mMemoryDealer->allocate(kMemoryDealerSize);
+    mCamera->releaseRecordingFrame(mIMem);
+
+    int32_t numFds = mFDP->ConsumeIntegralInRange<int32_t>(kNumMin, kNumMax);
+    int32_t numInts = mFDP->ConsumeIntegralInRange<int32_t>(kNumMin, kNumMax);
+    native_handle_t* handle = native_handle_create(numFds, numInts);
+    mCamera->releaseRecordingFrameHandle(handle);
+
+    int32_t msgTypeNC = mFDP->ConsumeIntegral<int32_t>();
+    int32_t ext = mFDP->ConsumeIntegral<int32_t>();
+    int32_t ext2 = mFDP->ConsumeIntegral<int32_t>();
+    mCamera->notifyCallback(msgTypeNC, ext, ext2);
+
+    int64_t timestamp = mFDP->ConsumeIntegral<int64_t>();
+    mCamera->dataCallbackTimestamp(timestamp, msgTypeNC, mIMem);
+    mCamera->recordingFrameHandleCallbackTimestamp(timestamp, handle);
+}
+
+void CameraFuzzer::invokeCameraUtils() {
+    CameraMetadata staticMetadata;
+    int32_t orientVal = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidOrientation)
+                                            : mFDP->ConsumeIntegral<int32_t>();
+    uint8_t facingVal = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFacing)
+                                            : mFDP->ConsumeIntegral<uint8_t>();
+    staticMetadata.update(ANDROID_SENSOR_ORIENTATION, &orientVal, 1);
+    staticMetadata.update(ANDROID_LENS_FACING, &facingVal, 1);
+    int32_t transform = 0;
+    CameraUtils::getRotationTransform(
+            staticMetadata, mFDP->ConsumeIntegral<int32_t>() /* mirrorMode */, &transform /*out*/);
+    CameraUtils::isCameraServiceDisabled();
+}
+
+void CameraFuzzer::invokeCameraBase() {
+    CameraInfo cameraInfo;
+    cameraInfo.facing = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFacing)
+                                            : mFDP->ConsumeIntegral<int>();
+    cameraInfo.orientation = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidOrientation)
+                                                 : mFDP->ConsumeIntegral<int>();
+    invokeReadWriteParcel<CameraInfo>(&cameraInfo);
+
+    CameraStatus* cameraStatus = nullptr;
+
+    if (mFDP->ConsumeBool()) {
+        cameraStatus = new CameraStatus();
+    } else {
+        string cid = mFDP->ConsumeRandomLengthString();
+        String8 id(cid.c_str());
+        int32_t status = mFDP->ConsumeIntegral<int32_t>();
+        size_t unavailSubIdsSize = mFDP->ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+        vector<String8> unavailSubIds;
+        for (size_t idx = 0; idx < unavailSubIdsSize; ++idx) {
+            string subId = mFDP->ConsumeRandomLengthString();
+            String8 unavailSubId(subId.c_str());
+            unavailSubIds.push_back(unavailSubId);
+        }
+        string clientPkg = mFDP->ConsumeRandomLengthString();
+        String8 clientPackage(clientPkg.c_str());
+        cameraStatus = new CameraStatus(id, status, unavailSubIds, clientPackage);
+    }
+
+    invokeReadWriteParcel<CameraStatus>(cameraStatus);
+    delete cameraStatus;
+}
+
+void CameraFuzzer::initCameraMetadata() {
+    if (mFDP->ConsumeBool()) {
+        mCameraMetadata = new CameraMetadata();
+    } else {
+        size_t entryCapacity = mFDP->ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+        size_t dataCapacity = mFDP->ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+        mCameraMetadata = new CameraMetadata(entryCapacity, dataCapacity);
+    }
+}
+
+void CameraFuzzer::invokeCameraMetadata() {
+    initCameraMetadata();
+
+    const camera_metadata_t* metadataBuffer = nullptr;
+    if (mFDP->ConsumeBool()) {
+        metadataBuffer = mCameraMetadata->getAndLock();
+    }
+
+    mCameraMetadata->entryCount();
+    mCameraMetadata->isEmpty();
+    mCameraMetadata->bufferSize();
+    mCameraMetadata->sort();
+
+    uint32_t tag = mFDP->ConsumeIntegral<uint32_t>();
+    uint8_t dataUint8 = mFDP->ConsumeIntegral<uint8_t>();
+    int32_t dataInt32 = mFDP->ConsumeIntegral<int32_t>();
+    int64_t dataInt64 = mFDP->ConsumeIntegral<int64_t>();
+    float dataFloat = mFDP->ConsumeFloatingPoint<float>();
+    double dataDouble = mFDP->ConsumeFloatingPoint<double>();
+    camera_metadata_rational dataRational;
+    dataRational.numerator = mFDP->ConsumeIntegral<int32_t>();
+    dataRational.denominator = mFDP->ConsumeIntegral<int32_t>();
+    string dataStr = mFDP->ConsumeRandomLengthString();
+    String8 dataString(dataStr.c_str());
+    size_t data_count = 1;
+    mCameraMetadata->update(tag, &dataUint8, data_count);
+    mCameraMetadata->update(tag, &dataInt32, data_count);
+    mCameraMetadata->update(tag, &dataFloat, data_count);
+    mCameraMetadata->update(tag, &dataInt64, data_count);
+    mCameraMetadata->update(tag, &dataRational, data_count);
+    mCameraMetadata->update(tag, &dataDouble, data_count);
+    mCameraMetadata->update(tag, dataString);
+
+    uint32_t tagExists = mFDP->ConsumeBool() ? tag : mFDP->ConsumeIntegral<uint32_t>();
+    mCameraMetadata->exists(tagExists);
+
+    uint32_t tagFind = mFDP->ConsumeBool() ? tag : mFDP->ConsumeIntegral<uint32_t>();
+    mCameraMetadata->find(tagFind);
+
+    uint32_t tagErase = mFDP->ConsumeBool() ? tag : mFDP->ConsumeIntegral<uint32_t>();
+    mCameraMetadata->erase(tagErase);
+
+    mCameraMetadata->unlock(metadataBuffer);
+    std::vector<int32_t> tagsRemoved;
+    uint64_t vendorId = mFDP->ConsumeIntegral<uint64_t>();
+    mCameraMetadata->removePermissionEntries(vendorId, &tagsRemoved);
+
+    string name = mFDP->ConsumeRandomLengthString();
+    VendorTagDescriptor vTags;
+    uint32_t tagName = mFDP->ConsumeIntegral<uint32_t>();
+    mCameraMetadata->getTagFromName(name.c_str(), &vTags, &tagName);
+
+    invokeReadWriteNullParcel<CameraMetadata>(mCameraMetadata);
+    invokeReadWriteParcel<CameraMetadata>(mCameraMetadata);
+
+    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+    int32_t verbosity = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+    int32_t indentation = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+    mCameraMetadata->dump(fd, verbosity, indentation);
+
+    CameraMetadata metadataCopy(mCameraMetadata->release());
+    CameraMetadata otherCameraMetadata;
+    mCameraMetadata->swap(otherCameraMetadata);
+    close(fd);
+}
+
+void CameraFuzzer::process(const uint8_t* data, size_t size) {
+    mFDP = new FuzzedDataProvider(data, size);
+    invokeCamera();
+    invokeCameraUtils();
+    invokeCameraBase();
+    invokeCameraMetadata();
+    delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    sp<CameraFuzzer> cameraFuzzer = new CameraFuzzer();
+    cameraFuzzer->process(data, size);
+    cameraFuzzer.clear();
+    return 0;
+}
diff --git a/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp b/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
new file mode 100644
index 0000000..e14d9ce
--- /dev/null
+++ b/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <VendorTagDescriptor.h>
+#include <binder/Parcel.h>
+#include <camera_metadata_tests_fake_vendor.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <system/camera_vendor_tags.h>
+
+#include <camera_metadata_hidden.h>
+#include "camera2common.h"
+
+using namespace std;
+using namespace android;
+
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+constexpr int32_t kVendorTagDescriptorId = -1;
+
+extern "C" {
+
+static int zero_get_tag_count(const vendor_tag_ops_t*) {
+    return 0;
+}
+
+static int default_get_tag_count(const vendor_tag_ops_t*) {
+    return VENDOR_TAG_COUNT_ERR;
+}
+
+static void default_get_all_tags(const vendor_tag_ops_t*, uint32_t*) {}
+
+static const char* default_get_section_name(const vendor_tag_ops_t*, uint32_t) {
+    return VENDOR_SECTION_NAME_ERR;
+}
+
+static const char* default_get_tag_name(const vendor_tag_ops_t*, uint32_t) {
+    return VENDOR_TAG_NAME_ERR;
+}
+
+static int default_get_tag_type(const vendor_tag_ops_t*, uint32_t) {
+    return VENDOR_TAG_TYPE_ERR;
+}
+
+} /*extern "C"*/
+
+static void FillWithDefaults(vendor_tag_ops_t* vOps) {
+    vOps->get_tag_count = default_get_tag_count;
+    vOps->get_all_tags = default_get_all_tags;
+    vOps->get_section_name = default_get_section_name;
+    vOps->get_tag_name = default_get_tag_name;
+    vOps->get_tag_type = default_get_tag_type;
+}
+
+class VendorTagDescriptorFuzzer {
+  public:
+    void process(const uint8_t* data, size_t size);
+    ~VendorTagDescriptorFuzzer() {
+        mVendorTagDescriptor.clear();
+        mVendorTagDescriptorCache.clear();
+    }
+
+  private:
+    void initVendorTagDescriptor();
+    void invokeVendorTagDescriptor();
+    void invokeVendorTagDescriptorCache();
+    void invokeVendorTagErrorConditions();
+    sp<VendorTagDescriptor> mVendorTagDescriptor = nullptr;
+    sp<VendorTagDescriptorCache> mVendorTagDescriptorCache = nullptr;
+    FuzzedDataProvider* mFDP = nullptr;
+};
+
+void VendorTagDescriptorFuzzer::initVendorTagDescriptor() {
+    if (mFDP->ConsumeBool()) {
+        mVendorTagDescriptor = new VendorTagDescriptor();
+    } else {
+        const vendor_tag_ops_t* vOps = &fakevendor_ops;
+        VendorTagDescriptor::createDescriptorFromOps(vOps, mVendorTagDescriptor);
+    }
+}
+
+void VendorTagDescriptorFuzzer::invokeVendorTagDescriptor() {
+    initVendorTagDescriptor();
+
+    sp<VendorTagDescriptor> vdesc = new VendorTagDescriptor();
+    vdesc->copyFrom(*mVendorTagDescriptor);
+    VendorTagDescriptor::setAsGlobalVendorTagDescriptor(mVendorTagDescriptor);
+    VendorTagDescriptor::getGlobalVendorTagDescriptor();
+
+    int32_t tagCount = mVendorTagDescriptor->getTagCount();
+    if (tagCount > 0) {
+        uint32_t tagArray[tagCount];
+        mVendorTagDescriptor->getTagArray(tagArray);
+        uint32_t tag;
+        for (int32_t i = 0; i < tagCount; ++i) {
+            tag = tagArray[i];
+            get_local_camera_metadata_section_name_vendor_id(tag, kVendorTagDescriptorId);
+            get_local_camera_metadata_tag_name_vendor_id(tag, kVendorTagDescriptorId);
+            get_local_camera_metadata_tag_type_vendor_id(tag, kVendorTagDescriptorId);
+            mVendorTagDescriptor->getSectionIndex(tag);
+        }
+        mVendorTagDescriptor->getAllSectionNames();
+    }
+
+    String8 name((mFDP->ConsumeRandomLengthString()).c_str());
+    String8 section((mFDP->ConsumeRandomLengthString()).c_str());
+    uint32_t lookupTag;
+    mVendorTagDescriptor->lookupTag(name, section, &lookupTag);
+
+    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+    int32_t verbosity = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+    int32_t indentation = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+    mVendorTagDescriptor->dump(fd, verbosity, indentation);
+
+    invokeReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor);
+    VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+    vdesc.clear();
+    close(fd);
+}
+
+void VendorTagDescriptorFuzzer::invokeVendorTagDescriptorCache() {
+    mVendorTagDescriptorCache = new VendorTagDescriptorCache();
+    uint64_t id = mFDP->ConsumeIntegral<uint64_t>();
+    initVendorTagDescriptor();
+
+    mVendorTagDescriptorCache->addVendorDescriptor(id, mVendorTagDescriptor);
+    VendorTagDescriptorCache::setAsGlobalVendorTagCache(mVendorTagDescriptorCache);
+    VendorTagDescriptorCache::getGlobalVendorTagCache();
+    sp<VendorTagDescriptor> tagDesc;
+    mVendorTagDescriptorCache->getVendorTagDescriptor(id, &tagDesc);
+
+    int32_t tagCount = mVendorTagDescriptorCache->getTagCount(id);
+    if (tagCount > 0) {
+        uint32_t tagArray[tagCount];
+        mVendorTagDescriptorCache->getTagArray(tagArray, id);
+        uint32_t tag;
+        for (int32_t i = 0; i < tagCount; ++i) {
+            tag = tagArray[i];
+            get_local_camera_metadata_section_name_vendor_id(tag, id);
+            get_local_camera_metadata_tag_name_vendor_id(tag, id);
+            get_local_camera_metadata_tag_type_vendor_id(tag, id);
+        }
+    }
+
+    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+    int32_t verbosity = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
+    int32_t indentation = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
+    mVendorTagDescriptorCache->dump(fd, verbosity, indentation);
+
+    invokeReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache);
+    VendorTagDescriptorCache::isVendorCachePresent(id);
+    mVendorTagDescriptorCache->getVendorIdsAndTagDescriptors();
+    mVendorTagDescriptorCache->clearGlobalVendorTagCache();
+    tagDesc.clear();
+    close(fd);
+}
+
+void VendorTagDescriptorFuzzer::invokeVendorTagErrorConditions() {
+    sp<VendorTagDescriptor> vDesc;
+    vendor_tag_ops_t vOps;
+    FillWithDefaults(&vOps);
+    vOps.get_tag_count = zero_get_tag_count;
+
+    if (mFDP->ConsumeBool()) {
+        VendorTagDescriptor::createDescriptorFromOps(/*vOps*/ NULL, vDesc);
+    } else {
+        VendorTagDescriptor::createDescriptorFromOps(&vOps, vDesc);
+        int32_t tagCount = vDesc->getTagCount();
+        uint32_t badTag = mFDP->ConsumeIntegral<uint32_t>();
+        uint32_t badTagArray[tagCount + 1];
+        vDesc->getTagArray(badTagArray);
+        vDesc->getSectionName(badTag);
+        vDesc->getTagName(badTag);
+        vDesc->getTagType(badTag);
+        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+        VendorTagDescriptor::getGlobalVendorTagDescriptor();
+        VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc);
+        invokeReadWriteNullParcelsp<VendorTagDescriptor>(vDesc);
+        vDesc.clear();
+    }
+}
+
+void VendorTagDescriptorFuzzer::process(const uint8_t* data, size_t size) {
+    mFDP = new FuzzedDataProvider(data, size);
+    invokeVendorTagDescriptor();
+    invokeVendorTagDescriptorCache();
+    invokeVendorTagErrorConditions();
+    delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    VendorTagDescriptorFuzzer vendorTagDescriptorFuzzer;
+    vendorTagDescriptorFuzzer.process(data, size);
+    return 0;
+}
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 79b1263..d757cd6 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -419,30 +419,39 @@
 /*
  * Saves metadata needed by Winscope to synchronize the screen recording playback with other traces.
  *
- * The metadata (version 1) is written as a binary array with the following format:
+ * The metadata (version 2) is written as a binary array with the following format:
  * - winscope magic string (#VV1NSC0PET1ME2#, 16B).
- * - the metadata version number (4B).
- * - Realtime-to-monotonic time offset in nanoseconds (8B).
- * - the recorded frames count (8B)
+ * - the metadata version number (4B little endian).
+ * - Realtime-to-elapsed time offset in nanoseconds (8B little endian).
+ * - the recorded frames count (8B little endian)
  * - for each recorded frame:
- *     - System time in monotonic clock timebase in nanoseconds (8B).
+ *     - System time in elapsed clock timebase in nanoseconds (8B little endian).
  *
- * All numbers are Little Endian encoded.
+ *
+ * Metadata version 2 changes
+ *
+ * Use elapsed time for compatibility with other UI traces (most of them):
+ * - Realtime-to-elapsed time offset (instead of realtime-to-monotonic)
+ * - Frame timestamps in elapsed clock timebase (instead of monotonic)
  */
 static status_t writeWinscopeMetadata(const Vector<std::int64_t>& timestampsMonotonicUs,
         const ssize_t metaTrackIdx, AMediaMuxer *muxer) {
     ALOGV("Writing winscope metadata");
 
     static constexpr auto kWinscopeMagicString = std::string_view {"#VV1NSC0PET1ME2#"};
-    static constexpr std::uint32_t metadataVersion = 1;
-    const std::int64_t realToMonotonicTimeOffsetNs =
-            systemTime(SYSTEM_TIME_REALTIME) - systemTime(SYSTEM_TIME_MONOTONIC);
+    static constexpr std::uint32_t metadataVersion = 2;
+
+    const auto elapsedTimeNs = android::elapsedRealtimeNano();
+    const std::int64_t elapsedToMonotonicTimeOffsetNs =
+            elapsedTimeNs - systemTime(SYSTEM_TIME_MONOTONIC);
+    const std::int64_t realToElapsedTimeOffsetNs =
+            systemTime(SYSTEM_TIME_REALTIME) - elapsedTimeNs;
     const std::uint32_t framesCount = static_cast<std::uint32_t>(timestampsMonotonicUs.size());
 
     sp<ABuffer> buffer = new ABuffer(
         kWinscopeMagicString.size() +
         sizeof(decltype(metadataVersion)) +
-        sizeof(decltype(realToMonotonicTimeOffsetNs)) +
+        sizeof(decltype(realToElapsedTimeOffsetNs)) +
         sizeof(decltype(framesCount)) +
         framesCount * sizeof(std::uint64_t)
     );
@@ -454,14 +463,16 @@
     writeValueLE(metadataVersion, pos);
     pos += sizeof(decltype(metadataVersion));
 
-    writeValueLE(realToMonotonicTimeOffsetNs, pos);
-    pos += sizeof(decltype(realToMonotonicTimeOffsetNs));
+    writeValueLE(realToElapsedTimeOffsetNs, pos);
+    pos += sizeof(decltype(realToElapsedTimeOffsetNs));
 
     writeValueLE(framesCount, pos);
     pos += sizeof(decltype(framesCount));
 
     for (const auto timestampMonotonicUs : timestampsMonotonicUs) {
-        writeValueLE<std::uint64_t>(timestampMonotonicUs * 1000, pos);
+        const auto timestampElapsedNs =
+                elapsedToMonotonicTimeOffsetNs + timestampMonotonicUs * 1000;
+        writeValueLE<std::uint64_t>(timestampElapsedNs, pos);
         pos += sizeof(std::uint64_t);
     }
 
@@ -1159,13 +1170,13 @@
         { NULL,                 0,                  NULL, 0 }
     };
 
-    std::optional<PhysicalDisplayId> displayId = SurfaceComposerClient::getInternalDisplayId();
-    if (!displayId) {
-        fprintf(stderr, "Failed to get ID for internal display\n");
+    const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
+    if (ids.empty()) {
+        fprintf(stderr, "Failed to get ID for any displays\n");
         return 1;
     }
 
-    gPhysicalDisplayId = *displayId;
+    gPhysicalDisplayId = ids.front();
 
     while (true) {
         int optionIndex = 0;
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index beeab54..c43f8ce 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -411,7 +411,10 @@
         composerClient = new SurfaceComposerClient;
         CHECK_EQ(composerClient->initCheck(), (status_t)OK);
 
-        const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+        const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
+        CHECK(!ids.empty());
+
+        const sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
         CHECK(display != nullptr);
 
         ui::DisplayMode mode;
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index 67c68e6..f042d5e 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -749,7 +749,10 @@
         composerClient = new SurfaceComposerClient;
         CHECK_EQ((status_t)OK, composerClient->initCheck());
 
-        const android::sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+	const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
+        CHECK(!ids.empty());
+
+        const android::sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
         CHECK(display != nullptr);
 
         ui::DisplayMode mode;
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index bc7e41e..185491f 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -78,10 +78,14 @@
     int fd = open(outputFileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
 
     if (fd < 0) {
-        ALOGE("couldn't open file");
-        return fd;
+        ALOGE("couldn't open output file %s", outputFileName);
+        return 1;
     }
-    sp<MediaMuxer> muxer = new MediaMuxer(fd, container);
+    sp<MediaMuxer> muxer = MediaMuxer::create(fd, container);
+    if (muxer == nullptr) {
+        fprintf(stderr, "unable to instantiate muxer for format %d\n", container);
+        return 1;
+    }
     close(fd);
 
     size_t trackCount = extractor->countTracks();
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 40b2392..1ffe801 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -318,7 +318,13 @@
     sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
     CHECK_EQ(composerClient->initCheck(), (status_t)OK);
 
-    const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+    const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
+    if (ids.empty()) {
+        SLOGE("Failed to get ID for any displays\n");
+        return 1;
+    }
+
+    const sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
     CHECK(display != nullptr);
 
     ui::DisplayMode mode;
diff --git a/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
index 201cf02..afc9b6a 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
@@ -144,6 +144,11 @@
             clearDataLengths.push_back(ss.numBytesOfClearData);
             encryptedDataLengths.push_back(ss.numBytesOfEncryptedData);
         }
+        if (in_args.keyId.size() != kBlockSize || in_args.iv.size() != kBlockSize) {
+            android_errorWriteLog(0x534e4554, "244569759");
+            detailedError = "invalid decrypt parameter size";
+            return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE, detailedError);
+        }
         auto res =
                 mSession->decrypt(in_args.keyId.data(), in_args.iv.data(),
                                   srcPtr, static_cast<uint8_t*>(destPtr),
diff --git a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
index 12aa4ea..054eabd 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
@@ -177,7 +177,7 @@
     UNUSED(in_optionalParameters);
 
     KeyRequestType keyRequestType = KeyRequestType::UNKNOWN;
-    std::string defaultUrl("https://default.url");
+    std::string defaultUrl("");
 
     _aidl_return->request = {};
     _aidl_return->requestType = keyRequestType;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index 3a675f6..7bc320d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -206,6 +206,11 @@
         return Void();
     } else if (mode == Mode::AES_CTR) {
         size_t bytesDecrypted;
+        if (keyId.size() != kBlockSize || iv.size() != kBlockSize) {
+            android_errorWriteLog(0x534e4554, "244569759");
+            _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "invalid decrypt parameter size");
+            return Void();
+        }
         Status_V1_2 res = mSession->decrypt(keyId.data(), iv.data(), srcPtr,
                 static_cast<uint8_t*>(destPtr), toVector(subSamples), &bytesDecrypted);
         if (res == Status_V1_2::OK) {
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 97e0b1d..4df8783 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -42,9 +42,14 @@
         mWidth(width), mHeight(height),
         mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
         mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
-        mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
-        mSize(hasData ? (bpp * width * height) : 0),
-        mIccSize(iccSize), mBitDepth(bitDepth) {
+        mRotationAngle(angle), mBytesPerPixel(bpp), mIccSize(iccSize),
+        mBitDepth(bitDepth) {
+            uint32_t multVal;
+            mRowBytes = __builtin_mul_overflow(bpp, width, &multVal) ? 0 : multVal;
+            mSize = __builtin_mul_overflow(multVal, height, &multVal) ? 0 : multVal;
+            if (hasData && (mRowBytes == 0 || mSize == 0)) {
+                ALOGE("Frame rowBytes/ size overflow %dx%d bpp %d", width, height, bpp);
+            }
     }
 
     void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
diff --git a/media/codec2/components/aom/C2SoftAomEnc.cpp b/media/codec2/components/aom/C2SoftAomEnc.cpp
index 362d75c..f49c1c4 100644
--- a/media/codec2/components/aom/C2SoftAomEnc.cpp
+++ b/media/codec2/components/aom/C2SoftAomEnc.cpp
@@ -95,7 +95,11 @@
                                                                            LEVEL_AV1_4_1))
                          .withFields({
                                  C2F(mProfileLevel, profile).equalTo(PROFILE_AV1_0),
-                                 C2F(mProfileLevel, level).equalTo(LEVEL_AV1_4_1),
+                                 C2F(mProfileLevel, level)
+                                    .oneOf({LEVEL_AV1_2, LEVEL_AV1_2_1, LEVEL_AV1_2_2,
+                                            LEVEL_AV1_2_3, LEVEL_AV1_3, LEVEL_AV1_3_1,
+                                            LEVEL_AV1_3_2, LEVEL_AV1_3_3, LEVEL_AV1_4,
+                                            LEVEL_AV1_4_1}),
                          })
                          .withSetter(ProfileLevelSetter)
                          .build());
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 953afc5..96a4c4a 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -671,6 +671,9 @@
 void C2SoftAvcDec::resetPlugin() {
     mSignalledOutputEos = false;
     mTimeStart = mTimeEnd = systemTime();
+    if (mOutBlock) {
+        mOutBlock.reset();
+    }
 }
 
 status_t C2SoftAvcDec::deleteDecoder() {
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index e51c511..f0e14d7 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -23,8 +23,8 @@
 
 #include <SimpleC2Component.h>
 #include <C2Config.h>
-#include "libgav1/src/gav1/decoder.h"
-#include "libgav1/src/gav1/decoder_settings.h"
+#include <gav1/decoder.h>
+#include <gav1/decoder_settings.h>
 
 namespace android {
 
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index a27c218..15d6dcd 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -664,6 +664,9 @@
 void C2SoftHevcDec::resetPlugin() {
     mSignalledOutputEos = false;
     mTimeStart = mTimeEnd = systemTime();
+    if (mOutBlock) {
+        mOutBlock.reset();
+    }
 }
 
 status_t C2SoftHevcDec::deleteDecoder() {
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index 9a41910..439323c 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -732,6 +732,9 @@
 void C2SoftMpeg2Dec::resetPlugin() {
     mSignalledOutputEos = false;
     mTimeStart = mTimeEnd = systemTime();
+    if (mOutBlock) {
+        mOutBlock.reset();
+    }
 }
 
 status_t C2SoftMpeg2Dec::deleteDecoder() {
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 54a1d0e..3bf9c48 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -256,7 +256,9 @@
     mFramesConfigured = false;
     mSignalledOutputEos = false;
     mSignalledError = false;
-
+    if (mOutBlock) {
+        mOutBlock.reset();
+    }
     return C2_OK;
 }
 
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 2efc00f..5700e5d 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -59,7 +59,7 @@
 
     addParameter(
         DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
-            .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+            .withDefault(new C2StreamPictureSizeInfo::input(0u, 64, 64))
             .withFields({
                 C2F(mSize, width).inRange(2, 2048, 2),
                 C2F(mSize, height).inRange(2, 2048, 2),
@@ -81,7 +81,7 @@
 
     addParameter(
         DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
-            .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+            .withDefault(new C2StreamFrameRateInfo::output(0u, 1.))
             // TODO: More restriction?
             .withFields({C2F(mFrameRate, value).greaterThan(0.)})
             .withSetter(
@@ -127,10 +127,18 @@
                 C2F(mProfileLevel, profile).equalTo(
                     PROFILE_VP9_0
                 ),
-                C2F(mProfileLevel, level).equalTo(
-                    LEVEL_VP9_4_1),
+                C2F(mProfileLevel, level).oneOf({
+                        C2Config::LEVEL_VP9_1,
+                        C2Config::LEVEL_VP9_1_1,
+                        C2Config::LEVEL_VP9_2,
+                        C2Config::LEVEL_VP9_2_1,
+                        C2Config::LEVEL_VP9_3,
+                        C2Config::LEVEL_VP9_3_1,
+                        C2Config::LEVEL_VP9_4,
+                        C2Config::LEVEL_VP9_4_1,
+                }),
             })
-            .withSetter(ProfileLevelSetter)
+            .withSetter(ProfileLevelSetter, mSize, mFrameRate, mBitrate)
             .build());
 #else
     addParameter(
@@ -144,7 +152,7 @@
                 C2F(mProfileLevel, level).equalTo(
                     LEVEL_UNUSED),
             })
-            .withSetter(ProfileLevelSetter)
+            .withSetter(ProfileLevelSetter, mSize, mFrameRate, mBitrate)
             .build());
 #endif
     addParameter(
@@ -217,14 +225,81 @@
 }
 
 C2R C2SoftVpxEnc::IntfImpl::ProfileLevelSetter(bool mayBlock,
-                                               C2P<C2StreamProfileLevelInfo::output>& me) {
+                                               C2P<C2StreamProfileLevelInfo::output>& me,
+                                               const C2P<C2StreamPictureSizeInfo::input>& size,
+                                               const C2P<C2StreamFrameRateInfo::output>& frameRate,
+                                               const C2P<C2StreamBitrateInfo::output>& bitrate) {
     (void)mayBlock;
+#ifdef VP9
     if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
         me.set().profile = PROFILE_VP9_0;
     }
-    if (!me.F(me.v.level).supportsAtAll(me.v.level)) {
+    struct LevelLimits {
+        C2Config::level_t level;
+        float samplesPerSec;
+        uint64_t samples;
+        uint32_t bitrate;
+        size_t dimension;
+    };
+    constexpr LevelLimits kLimits[] = {
+            {LEVEL_VP9_1, 829440, 36864, 200000, 512},
+            {LEVEL_VP9_1_1, 2764800, 73728, 800000, 768},
+            {LEVEL_VP9_2, 4608000, 122880, 1800000, 960},
+            {LEVEL_VP9_2_1, 9216000, 245760, 3600000, 1344},
+            {LEVEL_VP9_3, 20736000, 552960, 7200000, 2048},
+            {LEVEL_VP9_3_1, 36864000, 983040, 12000000, 2752},
+            {LEVEL_VP9_4, 83558400, 2228224, 18000000, 4160},
+            {LEVEL_VP9_4_1, 160432128, 2228224, 30000000, 4160},
+    };
+
+    uint64_t samples = size.v.width * size.v.height;
+    float samplesPerSec = float(samples) * frameRate.v.value;
+    size_t dimension = std::max(size.v.width, size.v.height);
+
+    // Check if the supplied level meets the samples / bitrate requirements.
+    // If not, update the level with the lowest level meeting the requirements.
+    bool found = false;
+
+    // By default needsUpdate = false in case the supplied level does meet
+    // the requirements.
+    bool needsUpdate = false;
+    for (const LevelLimits& limit : kLimits) {
+        if (samples <= limit.samples && samplesPerSec <= limit.samplesPerSec &&
+            bitrate.v.value <= limit.bitrate && dimension <= limit.dimension) {
+            // This is the lowest level that meets the requirements, and if
+            // we haven't seen the supplied level yet, that means we don't
+            // need the update.
+            if (needsUpdate) {
+                ALOGD("Given level %x does not cover current configuration: "
+                        "adjusting to %x",
+                        me.v.level, limit.level);
+                me.set().level = limit.level;
+            }
+            found = true;
+            break;
+        }
+        if (me.v.level == limit.level) {
+            // We break out of the loop when the lowest feasible level is
+            // found. The fact that we're here means that our level doesn't
+            // meet the requirement and needs to be updated.
+            needsUpdate = true;
+        }
+    }
+    if (!found) {
+        // We set to the highest supported level.
         me.set().level = LEVEL_VP9_4_1;
     }
+#else
+    (void)size;
+    (void)frameRate;
+    (void)bitrate;
+    if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
+        me.set().profile = PROFILE_VP8_0;
+    }
+    if (!me.F(me.v.level).supportsAtAll(me.v.level)) {
+        me.set().level = LEVEL_UNUSED;
+    }
+#endif
     return C2R::Ok();
 }
 
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 714fadb..bfb4444 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -243,9 +243,10 @@
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::input> &oldMe,
                           C2P<C2StreamPictureSizeInfo::input> &me);
 
-    static C2R ProfileLevelSetter(
-            bool mayBlock,
-            C2P<C2StreamProfileLevelInfo::output> &me);
+    static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::output>& me,
+                                  const C2P<C2StreamPictureSizeInfo::input>& size,
+                                  const C2P<C2StreamFrameRateInfo::output>& frameRate,
+                                  const C2P<C2StreamBitrateInfo::output>& bitrate);
 
     static C2R LayeringSetter(bool mayBlock, C2P<C2StreamTemporalLayeringTuning::output>& me);
 
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index b36e80a..524519c 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -85,6 +85,9 @@
         arm64: {
             src: "seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy",
         },
+        riscv64: {
+            src: "seccomp_policy/android.hardware.media.c2@1.2-default-riscv64.policy",
+        },
         x86: {
             src: "seccomp_policy/android.hardware.media.c2@1.2-default-x86.policy",
         },
@@ -96,3 +99,4 @@
     // This may be removed.
     required: ["crash_dump.policy"],
 }
+
diff --git a/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-riscv64.policy b/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-riscv64.policy
new file mode 100644
index 0000000..27f0b95
--- /dev/null
+++ b/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-riscv64.policy
@@ -0,0 +1,75 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+getuid: 1
+getrlimit: 1
+fstat: 1
+newfstatat: 1
+fstatfs: 1
+memfd_create: 1
+ftruncate: 1
+
+mremap: arg3 == 3 || arg3 == MREMAP_MAYMOVE
+munmap: 1
+prctl: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+gettimeofday: 1
+faccessat: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
+madvise: 1
+
+# crash dump policy additions
+clock_gettime: 1
+getpid: 1
+gettid: 1
+pipe2: 1
+recvmsg: 1
+process_vm_readv: 1
+tgkill: 1
+rt_sigaction: 1
+rt_tgsigqueueinfo: 1
+#mprotect: arg2 in 0x1|0x2
+munmap: 1
+#mmap: arg2 in 0x1|0x2
+geteuid: 1
+getgid: 1
+getegid: 1
+getgroups: 1
+
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index c8e9930..6335f13 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -72,7 +72,7 @@
     virtual void getArray(Vector<sp<MediaCodecBuffer>> *) const {}
 
     /**
-     * Return number of buffers the client owns.
+     * Return number of buffers owned by the client or the component.
      */
     virtual size_t numActiveSlots() const = 0;
 
@@ -595,8 +595,7 @@
     void flush();
 
     /**
-     * Return the number of buffers that are sent to the client but not released
-     * yet.
+     * Return the number of buffers that are sent to the client or the component.
      */
     size_t numActiveSlots() const;
 
@@ -716,8 +715,7 @@
     void grow(size_t newSize, std::function<sp<Codec2Buffer>()> alloc);
 
     /**
-     * Return the number of buffers that are sent to the client but not released
-     * yet.
+     * Return the number of buffers that are sent to the client or the component.
      */
     size_t numActiveSlots() const;
 
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 876c96d..b9270de 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -533,7 +533,7 @@
                         * align(mHeight, 64) / plane.rowSampling;
             }
 
-            if (minPtr == mView.data()[0] && (maxPtr - minPtr + 1) <= planeSize) {
+            if (minPtr == mView.data()[0] && (maxPtr - minPtr) <= planeSize) {
                 // FIXME: this is risky as reading/writing data out of bound results
                 //        in an undefined behavior, but gralloc does assume a
                 //        contiguous mapping
@@ -545,8 +545,7 @@
                     mediaImage->mPlane[i].mHorizSubsampling = plane.colSampling;
                     mediaImage->mPlane[i].mVertSubsampling = plane.rowSampling;
                 }
-                mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr),
-                                       maxPtr - minPtr + 1);
+                mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr), maxPtr - minPtr);
                 ALOGV("Converter: wrapped (capacity=%zu)", mWrapped->capacity());
             }
         }
diff --git a/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
index ef5800d..332d3ac 100644
--- a/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
@@ -38,7 +38,7 @@
            !strcmp(deviceCodeName, "Tiramisu");
 }
 
-bool isVendorApiOrFirstApiAtLeastT() {
+static bool isP010Allowed() {
     // The first SDK the device shipped with.
     static const int32_t kProductFirstApiLevel =
         base::GetIntProperty<int32_t>("ro.product.first_api_level", 0);
@@ -47,6 +47,17 @@
     // to signal which VSR requirements they conform to even if the first device SDK was higher.
     static const int32_t kBoardFirstApiLevel =
         base::GetIntProperty<int32_t>("ro.board.first_api_level", 0);
+
+    // Some devices that launched prior to Android S may not support P010 correctly, even
+    // though they may advertise it as supported.
+    if (kProductFirstApiLevel != 0 && kProductFirstApiLevel < __ANDROID_API_S__) {
+        return false;
+    }
+
+    if (kBoardFirstApiLevel != 0 && kBoardFirstApiLevel < __ANDROID_API_S__) {
+        return false;
+    }
+
     static const int32_t kBoardApiLevel =
         base::GetIntProperty<int32_t>("ro.board.api_level", 0);
 
@@ -67,7 +78,7 @@
     // API alone. For now limit P010 to devices that launched with Android T or known to conform
     // to Android T VSR (as opposed to simply limiting to a T vendor image).
     if (format == (AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010 &&
-            !isVendorApiOrFirstApiAtLeastT()) {
+            !isP010Allowed()) {
         return false;
     }
 
diff --git a/media/janitors/media_solutions_OWNERS b/media/janitors/media_solutions_OWNERS
index 69e3a5e..e0c87f7 100644
--- a/media/janitors/media_solutions_OWNERS
+++ b/media/janitors/media_solutions_OWNERS
@@ -1,10 +1,21 @@
 # Bug component: 1344
 # go/android-fwk-media-solutions for info on areas of ownership.
 
-# Main owners:
+# MediaRouter and native mirroring only:
 adadukin@google.com
 aquilescanta@google.com
+bishoygendy@google.com
 ivanbuper@google.com
 
-# In case of emergency:
-andrewlewis@google.com #{LAST_RESORT_SUGGESTION}
+# MediaMuxer, MediaRecorder, and seamless transcoding only:
+andrewlewis@google.com
+claincly@google.com
+
+# Everything in go/android-fwk-media-solutions not covered above:
+bachinger@google.com
+christosts@google.com
+ibaker@google.com
+michaelkatz@google.com
+rohks@google.com
+tianyifeng@google.com
+tonihei@google.com
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index add28e0..4b417a7 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -36,6 +36,9 @@
     symbol_file: "src/libaaudio.map.txt",
     first_version: "26",
     unversioned_until: "current",
+    export_header_libs: [
+        "libAAudio_headers",
+    ],
 }
 
 cc_library_headers {
diff --git a/media/libaaudio/TEST_MAPPING b/media/libaaudio/TEST_MAPPING
index 9aff137..5d3fb0a 100644
--- a/media/libaaudio/TEST_MAPPING
+++ b/media/libaaudio/TEST_MAPPING
@@ -1,5 +1,5 @@
 {
-  "postsubmit": [
+  "presubmit": [
     {
       "name": "CtsNativeMediaAAudioTestCases",
       "options" : [
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index a727db2..abfd8a7 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -887,6 +887,12 @@
  * will be respected if both this function and {@link AAudioStreamBuilder_setChannelMask} are
  * called.
  *
+ * Note that if the channel count is two then it may get mixed to mono when the device only supports
+ * one channel. If the channel count is greater than two but the device's supported channel count is
+ * less than the requested value, the channels higher than the device channel will be dropped. If
+ * higher channels should be mixed or spatialized, use {@link AAudioStreamBuilder_setChannelMask}
+ * instead.
+ *
  * Available since API level 26.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 938079b..9c82424 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -432,7 +432,7 @@
 AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    return audioStream->getState();
+    return audioStream->getStateExternal();
 }
 
 AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream)
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index c9351e0..c31947f 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -411,6 +411,8 @@
         return; // no change, the stream is already disconnected
     }
     mDisconnected.store(true);
+    // Wake up a wakeForStateChange thread if it exists.
+    syscall(SYS_futex, &mState, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0);
     // Track transition to DISCONNECTED state.
     android::mediametrics::LogItem(mMetricsId)
             .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DISCONNECT)
@@ -428,7 +430,7 @@
     }
 
     int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND; // arbitrary
-    aaudio_stream_state_t state = getState();
+    aaudio_stream_state_t state = getStateExternal();
     while (state == currentState && timeoutNanoseconds > 0) {
         if (durationNanos > timeoutNanoseconds) {
             durationNanos = timeoutNanoseconds;
@@ -447,7 +449,7 @@
             return result;
         }
 
-        state = getState();
+        state = getStateExternal();
     }
     if (nextState != nullptr) {
         *nextState = state;
@@ -638,6 +640,13 @@
     doSetVolume(); // apply this change
 }
 
+aaudio_stream_state_t AudioStream::getStateExternal() const {
+    if (isDisconnected()) {
+        return AAUDIO_STREAM_STATE_DISCONNECTED;
+    }
+    return getState();
+}
+
 void AudioStream::MyPlayerBase::registerWithAudioManager(const android::sp<AudioStream>& parent) {
     std::lock_guard<std::mutex> lock(mParentLock);
     mParent = parent;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 50f6aa0..5ddc515 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -194,6 +194,8 @@
         return mState.load();
     }
 
+    aaudio_stream_state_t getStateExternal() const;
+
     virtual int32_t getBufferSize() const {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 04b4325..ac4e2b3 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -24,7 +24,6 @@
 
 #include <aaudio/AAudio.h>
 #include <aaudio/AAudioTesting.h>
-#include <android/media/audio/common/AudioMMapPolicy.h>
 #include <android/media/audio/common/AudioMMapPolicyInfo.h>
 #include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/AudioSystem.h>
@@ -37,10 +36,10 @@
 #include "core/AudioStreamBuilder.h"
 #include "legacy/AudioStreamRecord.h"
 #include "legacy/AudioStreamTrack.h"
+#include "utility/AAudioUtilities.h"
 
 using namespace aaudio;
 
-using android::media::audio::common::AudioMMapPolicy;
 using android::media::audio::common::AudioMMapPolicyInfo;
 using android::media::audio::common::AudioMMapPolicyType;
 
@@ -95,37 +94,6 @@
     return result;
 }
 
-namespace {
-
-aaudio_policy_t aidl2legacy_aaudio_policy(AudioMMapPolicy aidl) {
-    switch (aidl) {
-        case AudioMMapPolicy::NEVER:
-            return AAUDIO_POLICY_NEVER;
-        case AudioMMapPolicy::AUTO:
-            return AAUDIO_POLICY_AUTO;
-        case AudioMMapPolicy::ALWAYS:
-            return AAUDIO_POLICY_ALWAYS;
-        case AudioMMapPolicy::UNSPECIFIED:
-        default:
-            return AAUDIO_UNSPECIFIED;
-    }
-}
-
-// The aaudio policy will be ALWAYS, NEVER, UNSPECIFIED only when all policy info are
-// ALWAYS, NEVER or UNSPECIFIED. Otherwise, the aaudio policy will be AUTO.
-aaudio_policy_t getAAudioPolicy(
-        const std::vector<AudioMMapPolicyInfo>& policyInfos) {
-    if (policyInfos.empty()) return AAUDIO_POLICY_AUTO;
-    for (size_t i = 1; i < policyInfos.size(); ++i) {
-        if (policyInfos.at(i).mmapPolicy != policyInfos.at(0).mmapPolicy) {
-            return AAUDIO_POLICY_AUTO;
-        }
-    }
-    return aidl2legacy_aaudio_policy(policyInfos.at(0).mmapPolicy);
-}
-
-} // namespace
-
 // Try to open using MMAP path if that is allowed.
 // Fall back to Legacy path if MMAP not available.
 // Exact behavior is controlled by MMapPolicy.
@@ -145,12 +113,28 @@
     }
 
     std::vector<AudioMMapPolicyInfo> policyInfos;
-    // The API setting is the highest priority.
     aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
-    // If not specified then get from a system property.
-    if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
-                AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
-        mmapPolicy = getAAudioPolicy(policyInfos);
+    if (android::AudioSystem::getMmapPolicyInfo(
+            AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
+        aaudio_policy_t systemMmapPolicy = AAudio_getAAudioPolicy(policyInfos);
+        if (mmapPolicy == AAUDIO_POLICY_ALWAYS && systemMmapPolicy == AAUDIO_POLICY_NEVER) {
+            // No need to try as AAudioService is not created and the client only wants MMAP path.
+            return AAUDIO_ERROR_NO_SERVICE;
+        }
+        // Use system property for mmap policy if
+        //    1. The API setting does not specify mmap policy or
+        //    2. The system property specifies MMAP policy as never. In this case, AAudioService
+        //       will not be started, no need to try mmap path.
+        if (mmapPolicy == AAUDIO_UNSPECIFIED || systemMmapPolicy == AAUDIO_POLICY_NEVER) {
+            mmapPolicy = systemMmapPolicy;
+        }
+    } else {
+        // If it fails querying mmap policy info, it is highly possible that the AAudioService is
+        // not created. In this case, we don't try mmap path.
+        if (mmapPolicy == AAUDIO_POLICY_ALWAYS) {
+            return AAUDIO_ERROR_NO_SERVICE;
+        }
+        mmapPolicy = AAUDIO_POLICY_NEVER;
     }
     // If still not specified then use the default.
     if (mmapPolicy == AAUDIO_UNSPECIFIED) {
@@ -161,7 +145,7 @@
     aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
     if (android::AudioSystem::getMmapPolicyInfo(
             AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
-        mmapExclusivePolicy = getAAudioPolicy(policyInfos);
+        mmapExclusivePolicy = AAudio_getAAudioPolicy(policyInfos);
     }
     if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
         mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index a197ced..0afa11b 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -16,24 +16,28 @@
 
 #define LOG_TAG "AAudio"
 //#define LOG_NDEBUG 0
-#include <utils/Log.h>
 
-#include <cutils/properties.h>
+#include <assert.h>
+#include <math.h>
 #include <stdint.h>
+
+#include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <cutils/properties.h>
 #include <sys/types.h>
+#include <system/audio.h>
 #include <utils/Errors.h>
+#include <utils/Log.h>
 
 #include "aaudio/AAudio.h"
 #include "core/AudioGlobal.h"
-#include <aaudio/AAudioTesting.h>
-#include <math.h>
-#include <system/audio.h>
-#include <assert.h>
-
 #include "utility/AAudioUtilities.h"
 
 using namespace android;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+
 status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
     // This covers the case for AAUDIO_OK and for positive results.
     if (result >= 0) {
@@ -638,3 +642,31 @@
     }
     return result;
 }
+
+namespace {
+
+aaudio_policy_t aidl2legacy_aaudio_policy(AudioMMapPolicy aidl) {
+    switch (aidl) {
+        case AudioMMapPolicy::NEVER:
+            return AAUDIO_POLICY_NEVER;
+        case AudioMMapPolicy::AUTO:
+            return AAUDIO_POLICY_AUTO;
+        case AudioMMapPolicy::ALWAYS:
+            return AAUDIO_POLICY_ALWAYS;
+        case AudioMMapPolicy::UNSPECIFIED:
+        default:
+            return AAUDIO_UNSPECIFIED;
+    }
+}
+
+} // namespace
+
+aaudio_policy_t AAudio_getAAudioPolicy(const std::vector<AudioMMapPolicyInfo>& policyInfos) {
+    if (policyInfos.empty()) return AAUDIO_POLICY_AUTO;
+    for (size_t i = 1; i < policyInfos.size(); ++i) {
+        if (policyInfos.at(i).mmapPolicy != policyInfos.at(0).mmapPolicy) {
+            return AAUDIO_POLICY_AUTO;
+        }
+    }
+    return aidl2legacy_aaudio_policy(policyInfos.at(0).mmapPolicy);
+}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index b59ce1c..ac75306 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -19,14 +19,17 @@
 
 #include <algorithm>
 #include <functional>
+#include <vector>
 #include <stdint.h>
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
 #include <utils/Errors.h>
 #include <system/audio.h>
 
 #include "aaudio/AAudio.h"
+#include "aaudio/AAudioTesting.h"
 
 /**
  * Convert an AAudio result into the closest matching Android status.
@@ -343,4 +346,9 @@
     AAUDIO_CHANNEL_INDEX_MASK_24 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 24) - 1,
 };
 
+// The aaudio policy will be ALWAYS, NEVER, UNSPECIFIED only when all policy info are
+// ALWAYS, NEVER or UNSPECIFIED. Otherwise, the aaudio policy will be AUTO.
+aaudio_policy_t AAudio_getAAudioPolicy(
+        const std::vector<android::media::audio::common::AudioMMapPolicyInfo>& policyInfos);
+
 #endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 4b45909..438be0a 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -214,3 +214,17 @@
     srcs: ["test_disconnect_race.cpp"],
     shared_libs: ["libaaudio"],
 }
+
+cc_test {
+    name: "aaudio_test_mmap_path",
+    defaults: [
+        "libaaudio_tests_defaults",
+    ],
+    srcs: ["test_mmap_path.cpp"],
+    shared_libs: [
+        "libaaudio",
+        "libaaudio_internal",
+        "libaudioclient",
+        "liblog",
+    ],
+}
diff --git a/media/libaaudio/tests/test_mmap_path.cpp b/media/libaaudio/tests/test_mmap_path.cpp
new file mode 100644
index 0000000..c8376f6
--- /dev/null
+++ b/media/libaaudio/tests/test_mmap_path.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "test_mmap_path"
+
+#include <vector>
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+#include <android/log.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <media/AudioSystem.h>
+
+#include <gtest/gtest.h>
+
+#include "utility/AAudioUtilities.h"
+
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
+/**
+ * Open a stream via AAudio API and set the performance mode as LOW_LATENCY. When MMAP is supported,
+ * the stream is supposed to be on MMAP path instead of legacy path. This is guaranteed on pixel
+ * devices, but may not be guaranteed on other vendor devices.
+ * @param direction the direction for the stream
+ */
+static void openStreamAndVerify(aaudio_direction_t direction) {
+    std::vector<AudioMMapPolicyInfo> policyInfos;
+    ASSERT_EQ(android::NO_ERROR, android::AudioSystem::getMmapPolicyInfo(
+            AudioMMapPolicyType::DEFAULT, &policyInfos));
+    if (AAudio_getAAudioPolicy(policyInfos) == AAUDIO_POLICY_NEVER) {
+        // Query the system MMAP policy, if it is NEVER, it indicates there is no MMAP support.
+        // In that case, there is no need to run the test. The reason of adding the query is to
+        // avoid someone accidentally run the test on device that doesn't support MMAP,
+        // such as cuttlefish.
+        ALOGD("Skip test as mmap is not supported");
+        return;
+    }
+
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+    AAudioStream *aaudioStream = nullptr;
+
+    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+    AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+    AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+    EXPECT_EQ(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAudioStream_getPerformanceMode(aaudioStream));
+    EXPECT_TRUE(AAudioStream_isMMapUsed(aaudioStream));
+
+    AAudioStream_close(aaudioStream);
+    AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+TEST(test_mmap_path, input) {
+    openStreamAndVerify(AAUDIO_DIRECTION_INPUT);
+}
+
+TEST(test_mmap_path, output) {
+    openStreamAndVerify(AAUDIO_DIRECTION_OUTPUT);
+}
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 1521882..6586e01 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -1039,6 +1039,16 @@
         {
             AUDIO_FORMAT_DRA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DRA)
         },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_ADAPTIVE_QLEA,
+            make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive.r3")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_ADAPTIVE_R4,
+            make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive.r4")
+        },
     }};
     return pairs;
 }
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index b6ddf56..6c198d3 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -48,7 +48,7 @@
 cc_library {
     name: "libaudiopolicy",
     srcs: [
-        "AudioAttributes.cpp",
+        "VolumeGroupAttributes.cpp",
         "AudioPolicy.cpp",
         "AudioProductStrategy.cpp",
         "AudioVolumeGroup.cpp",
diff --git a/media/libaudioclient/AudioPolicy.cpp b/media/libaudioclient/AudioPolicy.cpp
index 4d2b6b1..6bb0cbe 100644
--- a/media/libaudioclient/AudioPolicy.cpp
+++ b/media/libaudioclient/AudioPolicy.cpp
@@ -57,6 +57,10 @@
     case RULE_EXCLUDE_USERID:
         mValue.mUserId = (int) parcel->readInt32();
         break;
+    case RULE_MATCH_AUDIO_SESSION_ID:
+    case RULE_EXCLUDE_AUDIO_SESSION_ID:
+        mValue.mAudioSessionId = (audio_session_t) parcel->readInt32();
+        break;
     default:
         ALOGE("Trying to build AudioMixMatchCriterion from unknown rule %d", mRule);
         return BAD_VALUE;
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index ecd423a..381faf6 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -18,7 +18,7 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 #include <media/AudioProductStrategy.h>
-#include <media/AudioAttributes.h>
+#include <media/VolumeGroupAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
 namespace android {
@@ -42,8 +42,8 @@
     aidl.name = legacy.getName();
     aidl.audioAttributes = VALUE_OR_RETURN(
             convertContainer<std::vector<media::AudioAttributesEx>>(
-                    legacy.getAudioAttributes(),
-                    legacy2aidl_AudioAttributes_AudioAttributesEx));
+                    legacy.getVolumeGroupAttributes(),
+                    legacy2aidl_VolumeGroupAttributes_AudioAttributesEx));
     aidl.id = VALUE_OR_RETURN(legacy2aidl_product_strategy_t_int32_t(legacy.getId()));
     return aidl;
 }
@@ -53,9 +53,9 @@
     return AudioProductStrategy(
             aidl.name,
             VALUE_OR_RETURN(
-                    convertContainer<std::vector<AudioAttributes>>(
+                    convertContainer<std::vector<VolumeGroupAttributes>>(
                             aidl.audioAttributes,
-                            aidl2legacy_AudioAttributesEx_AudioAttributes)),
+                            aidl2legacy_AudioAttributesEx_VolumeGroupAttributes)),
             VALUE_OR_RETURN(aidl2legacy_int32_t_product_strategy_t(aidl.id)));
 }
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 05f27b0..af00ab1 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1336,7 +1336,7 @@
     return result.value_or(PRODUCT_STRATEGY_NONE);
 }
 
-status_t AudioSystem::getDevicesForAttributes(const AudioAttributes& aa,
+status_t AudioSystem::getDevicesForAttributes(const audio_attributes_t& aa,
                                               AudioDeviceTypeAddrVector* devices,
                                               bool forVolume) {
     if (devices == nullptr) {
@@ -1345,8 +1345,8 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
+    media::AudioAttributesInternal aaAidl = VALUE_OR_RETURN_STATUS(
+             legacy2aidl_audio_attributes_t_AudioAttributesInternal(aa));
     std::vector<AudioDevice> retAidl;
     RETURN_STATUS_IF_ERROR(
             statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, forVolume, &retAidl)));
@@ -2093,7 +2093,7 @@
     AudioProductStrategyVector strategies;
     listAudioProductStrategies(strategies);
     for (const auto& strategy : strategies) {
-        auto attrVect = strategy.getAudioAttributes();
+        auto attrVect = strategy.getVolumeGroupAttributes();
         auto iter = std::find_if(begin(attrVect), end(attrVect), [&stream](const auto& attributes) {
             return attributes.getStreamType() == stream;
         });
@@ -2107,7 +2107,7 @@
 
 audio_stream_type_t AudioSystem::attributesToStreamType(const audio_attributes_t& attr) {
     product_strategy_t psId;
-    status_t ret = AudioSystem::getProductStrategyFromAudioAttributes(AudioAttributes(attr), psId);
+    status_t ret = AudioSystem::getProductStrategyFromAudioAttributes(attr, psId);
     if (ret != NO_ERROR) {
         ALOGE("no strategy found for attributes %s", toString(attr).c_str());
         return AUDIO_STREAM_MUSIC;
@@ -2116,7 +2116,7 @@
     listAudioProductStrategies(strategies);
     for (const auto& strategy : strategies) {
         if (strategy.getId() == psId) {
-            auto attrVect = strategy.getAudioAttributes();
+            auto attrVect = strategy.getVolumeGroupAttributes();
             auto iter = std::find_if(begin(attrVect), end(attrVect), [&attr](const auto& refAttr) {
                 return AudioProductStrategy::attributesMatches(
                         refAttr.getAttributes(), attr);
@@ -2137,14 +2137,14 @@
     return AUDIO_STREAM_MUSIC;
 }
 
-status_t AudioSystem::getProductStrategyFromAudioAttributes(const AudioAttributes& aa,
+status_t AudioSystem::getProductStrategyFromAudioAttributes(const audio_attributes_t& aa,
                                                             product_strategy_t& productStrategy,
                                                             bool fallbackOnDefault) {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
+    media::AudioAttributesInternal aaAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_attributes_t_AudioAttributesInternal(aa));
     int32_t productStrategyAidl;
 
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -2167,14 +2167,14 @@
     return OK;
 }
 
-status_t AudioSystem::getVolumeGroupFromAudioAttributes(const AudioAttributes& aa,
+status_t AudioSystem::getVolumeGroupFromAudioAttributes(const audio_attributes_t &aa,
                                                         volume_group_t& volumeGroup,
                                                         bool fallbackOnDefault) {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
+    media::AudioAttributesInternal aaAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_attributes_t_AudioAttributesInternal(aa));
     int32_t volumeGroupAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getVolumeGroupFromAudioAttributes(aaAidl, fallbackOnDefault, &volumeGroupAidl)));
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
index ab95246..978599e 100644
--- a/media/libaudioclient/AudioVolumeGroup.cpp
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -23,7 +23,6 @@
 
 #include <media/AidlConversion.h>
 #include <media/AudioVolumeGroup.h>
-#include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
 namespace android {
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 520f09c..4423eb6 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -158,6 +158,11 @@
                     convertIntegral<int>(UNION_GET(aidl, userId).value()));
             *rule |= RULE_MATCH_USERID;
             return legacy;
+        case media::AudioMixMatchCriterionValue::audioSessionId:
+            legacy.mAudioSessionId = VALUE_OR_RETURN(
+                    aidl2legacy_int32_t_audio_session_t(UNION_GET(aidl, audioSessionId).value()));
+            *rule |= RULE_MATCH_AUDIO_SESSION_ID;
+            return legacy;
     }
     return unexpected(BAD_VALUE);
 }
@@ -185,7 +190,10 @@
         case RULE_MATCH_USERID:
             UNION_SET(aidl, userId, VALUE_OR_RETURN(convertReinterpret<uint32_t>(legacy.mUserId)));
             break;
-
+        case RULE_MATCH_AUDIO_SESSION_ID:
+            UNION_SET(aidl, audioSessionId,
+                VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.mAudioSessionId)));
+            break;
         default:
             return unexpected(BAD_VALUE);
     }
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
index 1aecfc6..d8151f5 100644
--- a/media/libaudioclient/TEST_MAPPING
+++ b/media/libaudioclient/TEST_MAPPING
@@ -23,9 +23,7 @@
     },
     {
       "name": "audiosystem_tests"
-    }
-  ],
-  "postsubmit": [
+    },
     {
       "name": "CtsNativeMediaAAudioTestCases",
       "options" : [
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/VolumeGroupAttributes.cpp
similarity index 73%
rename from media/libaudioclient/AudioAttributes.cpp
rename to media/libaudioclient/VolumeGroupAttributes.cpp
index 260c06c..2de4667 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/VolumeGroupAttributes.cpp
@@ -14,33 +14,33 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AudioAttributes"
+#define LOG_TAG "VolumeGroupAttributes"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
 #include <binder/Parcel.h>
 
 #include <media/AidlConversion.h>
-#include <media/AudioAttributes.h>
+#include <media/VolumeGroupAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
 namespace android {
 
-status_t AudioAttributes::readFromParcel(const Parcel* parcel) {
+status_t VolumeGroupAttributes::readFromParcel(const Parcel* parcel) {
     media::AudioAttributesEx aidl;
     RETURN_STATUS_IF_ERROR(aidl.readFromParcel(parcel));
-    *this = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioAttributesEx_AudioAttributes(aidl));
+    *this = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioAttributesEx_VolumeGroupAttributes(aidl));
     return OK;
 }
 
-status_t AudioAttributes::writeToParcel(Parcel* parcel) const {
+status_t VolumeGroupAttributes::writeToParcel(Parcel* parcel) const {
     media::AudioAttributesEx aidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_AudioAttributes_AudioAttributesEx(*this));
+            legacy2aidl_VolumeGroupAttributes_AudioAttributesEx(*this));
     return aidl.writeToParcel(parcel);
 }
 
 ConversionResult<media::AudioAttributesEx>
-legacy2aidl_AudioAttributes_AudioAttributesEx(const AudioAttributes& legacy) {
+legacy2aidl_VolumeGroupAttributes_AudioAttributesEx(const VolumeGroupAttributes& legacy) {
     media::AudioAttributesEx aidl;
     aidl.attributes = VALUE_OR_RETURN(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(legacy.getAttributes()));
@@ -50,9 +50,9 @@
     return aidl;
 }
 
-ConversionResult<AudioAttributes>
-aidl2legacy_AudioAttributesEx_AudioAttributes(const media::AudioAttributesEx& aidl) {
-    return AudioAttributes(VALUE_OR_RETURN(aidl2legacy_int32_t_volume_group_t(aidl.groupId)),
+ConversionResult<VolumeGroupAttributes>
+aidl2legacy_AudioAttributesEx_VolumeGroupAttributes(const media::AudioAttributesEx& aidl) {
+    return VolumeGroupAttributes(VALUE_OR_RETURN(aidl2legacy_int32_t_volume_group_t(aidl.groupId)),
                            VALUE_OR_RETURN(aidl2legacy_AudioStreamType_audio_stream_type_t(
                                    aidl.streamType)),
                            VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(
diff --git a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
index 921a93a..0f373a2 100644
--- a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
@@ -28,4 +28,6 @@
     /** Interpreted as uid_t. */
     int uid;
     int userId;
+    /** Interpreted as audio_session_t. */
+    int audioSessionId;
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 8ac89a8..24b59bf 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -137,7 +137,7 @@
 
     int /* product_strategy_t */ getStrategyForStream(AudioStreamType stream);
 
-    AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr, boolean forVolume);
+    AudioDevice[] getDevicesForAttributes(in AudioAttributesInternal attr, boolean forVolume);
 
     int /* audio_io_handle_t */ getOutputForEffect(in EffectDescriptor desc);
 
@@ -313,11 +313,11 @@
     boolean isUltrasoundSupported();
 
     AudioProductStrategy[] listAudioProductStrategies();
-    int /* product_strategy_t */ getProductStrategyFromAudioAttributes(in AudioAttributesEx aa,
-                                                                       boolean fallbackOnDefault);
+    int /* product_strategy_t */ getProductStrategyFromAudioAttributes(
+            in AudioAttributesInternal aa, boolean fallbackOnDefault);
 
     AudioVolumeGroup[] listAudioVolumeGroups();
-    int /* volume_group_t */ getVolumeGroupFromAudioAttributes(in AudioAttributesEx aa,
+    int /* volume_group_t */ getVolumeGroupFromAudioAttributes(in AudioAttributesInternal aa,
                                                                boolean fallbackOnDefault);
 
     void setRttEnabled(boolean enabled);
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index cab476e..61f2069 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -34,11 +34,13 @@
 #define RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET (0x1 << 1)
 #define RULE_MATCH_UID                      (0x1 << 2)
 #define RULE_MATCH_USERID                   (0x1 << 3)
+#define RULE_MATCH_AUDIO_SESSION_ID         (0x1 << 4)
 #define RULE_EXCLUDE_ATTRIBUTE_USAGE  (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_USAGE)
 #define RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET \
                                       (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET)
 #define RULE_EXCLUDE_UID              (RULE_EXCLUSION_MASK|RULE_MATCH_UID)
 #define RULE_EXCLUDE_USERID           (RULE_EXCLUSION_MASK|RULE_MATCH_USERID)
+#define RULE_EXCLUDE_AUDIO_SESSION_ID       (RULE_EXCLUSION_MASK|RULE_MATCH_AUDIO_SESSION_ID)
 
 #define MIX_TYPE_INVALID (-1)
 #define MIX_TYPE_PLAYERS 0
@@ -78,6 +80,7 @@
         audio_source_t  mSource;
         uid_t           mUid;
         int        mUserId;
+        audio_session_t  mAudioSessionId;
     } mValue;
     uint32_t        mRule;
 };
diff --git a/media/libaudioclient/include/media/AudioProductStrategy.h b/media/libaudioclient/include/media/AudioProductStrategy.h
index b55b506..7bcb5aa 100644
--- a/media/libaudioclient/include/media/AudioProductStrategy.h
+++ b/media/libaudioclient/include/media/AudioProductStrategy.h
@@ -20,7 +20,7 @@
 #include <android/media/AudioProductStrategy.h>
 #include <media/AidlConversionUtil.h>
 #include <media/AudioCommonTypes.h>
-#include <media/AudioAttributes.h>
+#include <media/VolumeGroupAttributes.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
 #include <binder/Parcelable.h>
@@ -31,12 +31,15 @@
 {
 public:
     AudioProductStrategy() {}
-    AudioProductStrategy(const std::string &name, const std::vector<AudioAttributes> &attributes,
+    AudioProductStrategy(const std::string &name,
+                         const std::vector<VolumeGroupAttributes> &attributes,
                          product_strategy_t id) :
-        mName(name), mAudioAttributes(attributes), mId(id) {}
+        mName(name), mVolumeGroupAttributes(attributes), mId(id) {}
 
     const std::string &getName() const { return mName; }
-    std::vector<AudioAttributes> getAudioAttributes() const { return mAudioAttributes; }
+    std::vector<VolumeGroupAttributes> getVolumeGroupAttributes() const {
+        return mVolumeGroupAttributes;
+    }
     product_strategy_t getId() const { return mId; }
 
     status_t readFromParcel(const Parcel *parcel) override;
@@ -58,7 +61,7 @@
                                   const audio_attributes_t clientAttritubes);
 private:
     std::string mName;
-    std::vector<AudioAttributes> mAudioAttributes;
+    std::vector<VolumeGroupAttributes> mVolumeGroupAttributes;
     product_strategy_t mId;
 };
 
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 1c414ec..6e6b9c8 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -370,7 +370,7 @@
     static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
 
     static product_strategy_t getStrategyForStream(audio_stream_type_t stream);
-    static status_t getDevicesForAttributes(const AudioAttributes &aa,
+    static status_t getDevicesForAttributes(const audio_attributes_t &aa,
                                             AudioDeviceTypeAddrVector *devices,
                                             bool forVolume);
 
@@ -494,7 +494,7 @@
 
     static status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
     static status_t getProductStrategyFromAudioAttributes(
-            const AudioAttributes &aa, product_strategy_t &productStrategy,
+            const audio_attributes_t &aa, product_strategy_t &productStrategy,
             bool fallbackOnDefault = true);
 
     static audio_attributes_t streamTypeToAttributes(audio_stream_type_t stream);
@@ -503,7 +503,8 @@
     static status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups);
 
     static status_t getVolumeGroupFromAudioAttributes(
-            const AudioAttributes &aa, volume_group_t &volumeGroup, bool fallbackOnDefault = true);
+            const audio_attributes_t &aa, volume_group_t &volumeGroup,
+            bool fallbackOnDefault = true);
 
     static status_t setRttEnabled(bool enabled);
 
diff --git a/media/libaudioclient/include/media/AudioAttributes.h b/media/libaudioclient/include/media/VolumeGroupAttributes.h
similarity index 74%
rename from media/libaudioclient/include/media/AudioAttributes.h
rename to media/libaudioclient/include/media/VolumeGroupAttributes.h
index 24bd179..0859995 100644
--- a/media/libaudioclient/include/media/AudioAttributes.h
+++ b/media/libaudioclient/include/media/VolumeGroupAttributes.h
@@ -26,15 +26,20 @@
 
 namespace android {
 
-class AudioAttributes : public Parcelable
+class VolumeGroupAttributes : public Parcelable
 {
 public:
-    AudioAttributes() = default;
-    AudioAttributes(const audio_attributes_t &attributes) : mAttributes(attributes) {} // NOLINT
-    AudioAttributes(volume_group_t groupId,
+    VolumeGroupAttributes() = default;
+    VolumeGroupAttributes(const audio_attributes_t &attributes)
+        : mAttributes(attributes) {} // NOLINT
+    VolumeGroupAttributes(volume_group_t groupId,
                     audio_stream_type_t stream,
                     const audio_attributes_t &attributes) :
-         mAttributes(attributes), mStreamType(stream), mGroupId(groupId) {}
+         mAttributes(attributes), mStreamType(stream), mGroupId(groupId) {
+        // TODO: align native & JAVA source initializer.
+        // As far as this class concerns attributes for volume group, it applies only to playback.
+        mAttributes.source = AUDIO_SOURCE_INVALID;
+    }
 
     audio_attributes_t getAttributes() const { return mAttributes; }
 
@@ -61,8 +66,8 @@
 
 // AIDL conversion routines.
 ConversionResult<media::AudioAttributesEx>
-legacy2aidl_AudioAttributes_AudioAttributesEx(const AudioAttributes& legacy);
-ConversionResult<AudioAttributes>
-aidl2legacy_AudioAttributesEx_AudioAttributes(const media::AudioAttributesEx& aidl);
+legacy2aidl_VolumeGroupAttributes_AudioAttributesEx(const VolumeGroupAttributes& legacy);
+ConversionResult<VolumeGroupAttributes>
+aidl2legacy_AudioAttributesEx_VolumeGroupAttributes(const media::AudioAttributesEx& aidl);
 
 } // namespace android
diff --git a/media/libaudioclient/tests/audio_test_utils.cpp b/media/libaudioclient/tests/audio_test_utils.cpp
index 44f0f50..850eb34 100644
--- a/media/libaudioclient/tests/audio_test_utils.cpp
+++ b/media/libaudioclient/tests/audio_test_utils.cpp
@@ -200,7 +200,7 @@
 status_t AudioPlayback::waitForConsumption(bool testSeek) {
     if (PLAY_STARTED != mState) return INVALID_OPERATION;
     // in static buffer mode, lets not play clips with duration > 30 sec
-    int retry = 30;
+    int retry = 300;
     // Total number of frames in the input file.
     size_t totalFrameCount = mMemCapacity / mTrack->frameSize();
     while (!mStopPlaying && retry > 0) {
@@ -227,7 +227,7 @@
             if (bufferPosition != setPosition) return BAD_VALUE;
             mTrack->start();
         }
-        std::this_thread::sleep_for(std::chrono::milliseconds(300));
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
         retry--;
     }
     if (!mStopPlaying) return TIMED_OUT;
diff --git a/media/libaudioclient/tests/audioclient_serialization_tests.cpp b/media/libaudioclient/tests/audioclient_serialization_tests.cpp
index ef8500b..d1e3d16 100644
--- a/media/libaudioclient/tests/audioclient_serialization_tests.cpp
+++ b/media/libaudioclient/tests/audioclient_serialization_tests.cpp
@@ -66,16 +66,16 @@
                  decltype(audio_stream_type_from_string)>(xsdc_enum_range<xsd::AudioStreamType>{},
                                                           audio_stream_type_from_string);
 
-static const std::vector<uint32_t> kMixMatchRules = {
-        RULE_MATCH_ATTRIBUTE_USAGE,
-        RULE_EXCLUDE_ATTRIBUTE_USAGE,
-        RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET,
-        RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET,
-        RULE_MATCH_UID,
-        RULE_EXCLUDE_UID,
-        RULE_MATCH_USERID,
-        RULE_EXCLUDE_USERID,
-};
+static const std::vector<uint32_t> kMixMatchRules = {RULE_MATCH_ATTRIBUTE_USAGE,
+                                                     RULE_EXCLUDE_ATTRIBUTE_USAGE,
+                                                     RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET,
+                                                     RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET,
+                                                     RULE_MATCH_UID,
+                                                     RULE_EXCLUDE_UID,
+                                                     RULE_MATCH_USERID,
+                                                     RULE_EXCLUDE_USERID,
+                                                     RULE_MATCH_AUDIO_SESSION_ID,
+                                                     RULE_EXCLUDE_AUDIO_SESSION_ID};
 
 // Generates a random string.
 std::string CreateRandomString(size_t n) {
@@ -119,16 +119,17 @@
 TEST_F(SerializationTest, AudioProductStrategyBinderization) {
     for (int j = 0; j < 512; j++) {
         const std::string name{"Test APSBinderization for seed::" + std::to_string(mSeed)};
-        std::vector<AudioAttributes> audioattributesvector;
+        std::vector<VolumeGroupAttributes> volumeGroupAttrVector;
         for (auto i = 0; i < 16; i++) {
             audio_attributes_t attributes;
             fillAudioAttributes(attributes);
-            AudioAttributes audioattributes{static_cast<volume_group_t>(rand()),
-                                            kStreamtypes[rand() % kStreamtypes.size()], attributes};
-            audioattributesvector.push_back(audioattributes);
+            VolumeGroupAttributes volumeGroupAttr{static_cast<volume_group_t>(rand()),
+                                                  kStreamtypes[rand() % kStreamtypes.size()],
+                                                  attributes};
+            volumeGroupAttrVector.push_back(volumeGroupAttr);
         }
         product_strategy_t psId = static_cast<product_strategy_t>(rand());
-        AudioProductStrategy aps{name, audioattributesvector, psId};
+        AudioProductStrategy aps{name, volumeGroupAttrVector, psId};
 
         Parcel p;
         EXPECT_EQ(NO_ERROR, aps.writeToParcel(&p)) << name;
@@ -138,12 +139,12 @@
         EXPECT_EQ(NO_ERROR, apsCopy.readFromParcel(&p)) << name;
         EXPECT_EQ(apsCopy.getName(), name) << name;
         EXPECT_EQ(apsCopy.getId(), psId) << name;
-        auto avec = apsCopy.getAudioAttributes();
-        EXPECT_EQ(avec.size(), audioattributesvector.size()) << name;
-        for (int i = 0; i < audioattributesvector.size(); i++) {
-            EXPECT_EQ(avec[i].getGroupId(), audioattributesvector[i].getGroupId()) << name;
-            EXPECT_EQ(avec[i].getStreamType(), audioattributesvector[i].getStreamType()) << name;
-            EXPECT_TRUE(avec[i].getAttributes() == audioattributesvector[i].getAttributes())
+        auto avec = apsCopy.getVolumeGroupAttributes();
+        EXPECT_EQ(avec.size(), volumeGroupAttrVector.size()) << name;
+        for (int i = 0; i < volumeGroupAttrVector.size(); i++) {
+            EXPECT_EQ(avec[i].getGroupId(), volumeGroupAttrVector[i].getGroupId()) << name;
+            EXPECT_EQ(avec[i].getStreamType(), volumeGroupAttrVector[i].getStreamType()) << name;
+            EXPECT_TRUE(avec[i].getAttributes() == volumeGroupAttrVector[i].getAttributes())
                     << name;
         }
     }
@@ -293,17 +294,17 @@
     audio_stream_type_t stream = mAudioStream;
     audio_attributes_t attributes;
     fillAudioAttributes(attributes);
-    AudioAttributes audioattributes{groupId, stream, attributes};
+    VolumeGroupAttributes volumeGroupAttr{groupId, stream, attributes};
 
     Parcel p;
-    EXPECT_EQ(NO_ERROR, audioattributes.writeToParcel(&p)) << msg;
+    EXPECT_EQ(NO_ERROR, volumeGroupAttr.writeToParcel(&p)) << msg;
 
-    AudioAttributes audioattributesCopy;
+    VolumeGroupAttributes volumeGroupAttrCopy;
     p.setDataPosition(0);
-    EXPECT_EQ(NO_ERROR, audioattributesCopy.readFromParcel(&p)) << msg;
-    EXPECT_EQ(audioattributesCopy.getGroupId(), audioattributes.getGroupId()) << msg;
-    EXPECT_EQ(audioattributesCopy.getStreamType(), audioattributes.getStreamType()) << msg;
-    EXPECT_TRUE(audioattributesCopy.getAttributes() == attributes) << msg;
+    EXPECT_EQ(NO_ERROR, volumeGroupAttrCopy.readFromParcel(&p)) << msg;
+    EXPECT_EQ(volumeGroupAttrCopy.getGroupId(), volumeGroupAttr.getGroupId()) << msg;
+    EXPECT_EQ(volumeGroupAttrCopy.getStreamType(), volumeGroupAttr.getStreamType()) << msg;
+    EXPECT_TRUE(volumeGroupAttrCopy.getAttributes() == attributes) << msg;
 }
 
 // audioStream
diff --git a/media/libaudioclient/tests/audiorouting_tests.cpp b/media/libaudioclient/tests/audiorouting_tests.cpp
index 445633b..4bd81c8 100644
--- a/media/libaudioclient/tests/audiorouting_tests.cpp
+++ b/media/libaudioclient/tests/audiorouting_tests.cpp
@@ -26,31 +26,18 @@
 
 // UNIT TEST
 TEST(AudioTrackTest, TestPerformanceMode) {
-    std::vector<std::string> attachedDevices;
-    std::vector<MixPort> mixPorts;
-    std::vector<Route> routes;
-    EXPECT_EQ(OK, parse_audio_policy_configuration_xml(attachedDevices, mixPorts, routes));
-    std::string output_flags_string[] = {"AUDIO_OUTPUT_FLAG_FAST", "AUDIO_OUTPUT_FLAG_DEEP_BUFFER"};
+    std::vector<struct audio_port_v7> ports;
+    ASSERT_EQ(OK, listAudioPorts(ports));
     audio_output_flags_t output_flags[] = {AUDIO_OUTPUT_FLAG_FAST, AUDIO_OUTPUT_FLAG_DEEP_BUFFER};
     audio_flags_mask_t flags[] = {AUDIO_FLAG_LOW_LATENCY, AUDIO_FLAG_DEEP_BUFFER};
     bool hasFlag = false;
     for (int i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) {
         hasFlag = false;
-        for (int j = 0; j < mixPorts.size() && !hasFlag; j++) {
-            MixPort port = mixPorts[j];
-            if (port.role == "source" && port.flags.find(output_flags_string[i]) != -1) {
-                for (int k = 0; k < routes.size() && !hasFlag; k++) {
-                    if (routes[k].sources.find(port.name) != -1 &&
-                        std::find(attachedDevices.begin(), attachedDevices.end(), routes[k].sink) !=
-                                attachedDevices.end()) {
-                        hasFlag = true;
-                        std::cerr << "found port with flag " << output_flags_string[i] << "@ "
-                                  << " port :: name : " << port.name << " role : " << port.role
-                                  << " port :: flags : " << port.flags
-                                  << " connected via route name : " << routes[k].name
-                                  << " route sources : " << routes[k].sources
-                                  << " route sink : " << routes[k].sink << std::endl;
-                    }
+        for (const auto& port : ports) {
+            if (port.role == AUDIO_PORT_ROLE_SOURCE && port.type == AUDIO_PORT_TYPE_MIX) {
+                if ((port.active_config.flags.output & output_flags[i]) != 0) {
+                    hasFlag = true;
+                    break;
                 }
             }
         }
diff --git a/media/libaudioclient/tests/audiosystem_tests.cpp b/media/libaudioclient/tests/audiosystem_tests.cpp
index aed847c..3dd2c95 100644
--- a/media/libaudioclient/tests/audiosystem_tests.cpp
+++ b/media/libaudioclient/tests/audiosystem_tests.cpp
@@ -332,7 +332,7 @@
 
 bool isPublicStrategy(const AudioProductStrategy& strategy) {
     bool result = true;
-    for (auto& attribute : strategy.getAudioAttributes()) {
+    for (auto& attribute : strategy.getVolumeGroupAttributes()) {
         if (attribute.getAttributes() == AUDIO_ATTRIBUTES_INITIALIZER &&
             (uint32_t(attribute.getStreamType()) >= AUDIO_STREAM_PUBLIC_CNT)) {
             result = false;
@@ -371,7 +371,7 @@
     for (const auto& strategy : strategies) {
         if (!isPublicStrategy(strategy)) continue;
 
-        for (const auto& att : strategy.getAudioAttributes()) {
+        for (const auto& att : strategy.getVolumeGroupAttributes()) {
             if (strategy.attributesMatches(att.getAttributes(), attributes)) {
                 hasStrategyForMedia = true;
                 mediaStrategy = strategy;
diff --git a/media/libaudiofoundation/TEST_MAPPING b/media/libaudiofoundation/TEST_MAPPING
index dbae9a0..a4e271e 100644
--- a/media/libaudiofoundation/TEST_MAPPING
+++ b/media/libaudiofoundation/TEST_MAPPING
@@ -2,9 +2,7 @@
   "presubmit": [
     {
       "name": "audiofoundation_parcelable_test"
-    }
-  ],
-  "postsubmit": [
+    },
     {
       "name": "CtsNativeMediaAAudioTestCases",
       "options" : [
diff --git a/media/libaudiohal/TEST_MAPPING b/media/libaudiohal/TEST_MAPPING
index 9aff137..5d3fb0a 100644
--- a/media/libaudiohal/TEST_MAPPING
+++ b/media/libaudiohal/TEST_MAPPING
@@ -1,5 +1,5 @@
 {
-  "postsubmit": [
+  "presubmit": [
     {
       "name": "CtsNativeMediaAAudioTestCases",
       "options" : [
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 8743c04..3956a6c 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -17,11 +17,16 @@
 #define LOG_TAG "EffectHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <android-base/stringprintf.h>
 #include <common/all-versions/VersionUtils.h>
 #include <cutils/native_handle.h>
+#include <cutils/properties.h>
 #include <hwbinder/IPCThreadState.h>
 #include <media/EffectsFactoryApi.h>
+#include <mediautils/SchedulingPolicyService.h>
 #include <mediautils/TimeCheck.h>
+#include <system/audio_effects/effect_spatializer.h>
 #include <utils/Log.h>
 
 #include <util/EffectUtils.h>
@@ -50,6 +55,18 @@
     effect_descriptor_t halDescriptor{};
     if (EffectHalHidl::getDescriptor(&halDescriptor) == NO_ERROR) {
         mIsInput = (halDescriptor.flags & EFFECT_FLAG_TYPE_PRE_PROC) == EFFECT_FLAG_TYPE_PRE_PROC;
+        const bool isSpatializer =
+                memcmp(&halDescriptor.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0;
+        if (isSpatializer) {
+            constexpr int32_t kRTPriorityMin = 1;
+            constexpr int32_t kRTPriorityMax = 3;
+            const int32_t priorityBoost = property_get_int32("audio.spatializer.priority", 1);
+            if (priorityBoost >= kRTPriorityMin && priorityBoost <= kRTPriorityMax) {
+                ALOGD("%s: audio.spatializer.priority %d on effect %lld",
+                         __func__, priorityBoost, (long long)effectId);
+                mHalThreadPriority = priorityBoost;
+            }
+        }
     }
 }
 
@@ -127,6 +144,8 @@
         ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
         return NO_INIT;
     }
+
+    (void)checkHalThreadPriority();
     mStatusMQ = std::move(tempStatusMQ);
     return OK;
 }
@@ -317,5 +336,67 @@
     return result;
 }
 
+status_t EffectHalHidl::getHalPid(pid_t *pid) const {
+    using ::android::hidl::base::V1_0::DebugInfo;
+    using ::android::hidl::manager::V1_0::IServiceManager;
+    DebugInfo debugInfo;
+    const auto ret = mEffect->getDebugInfo([&] (const auto &info) {
+        debugInfo = info;
+    });
+    if (!ret.isOk()) {
+        ALOGW("%s: cannot get effect debug info", __func__);
+        return INVALID_OPERATION;
+    }
+    if (debugInfo.pid != (int)IServiceManager::PidConstant::NO_PID) {
+        *pid = debugInfo.pid;
+        return NO_ERROR;
+    }
+    ALOGW("%s: effect debug info does not contain pid", __func__);
+    return NAME_NOT_FOUND;
+}
+
+status_t EffectHalHidl::getHalWorkerTid(pid_t *tid) {
+    int32_t reply = -1;
+    uint32_t replySize = sizeof(reply);
+    const status_t status =
+            command('gtid', 0 /* cmdSize */, nullptr /* pCmdData */, &replySize, &reply);
+    if (status == OK) {
+        *tid = (pid_t)reply;
+    } else {
+        ALOGW("%s: failed with status:%d", __func__, status);
+    }
+    return status;
+}
+
+bool EffectHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
+    if (mHalThreadPriority == kRTPriorityDisabled) {
+        return true;
+    }
+    const int err = requestPriority(
+            threadPid, threadId,
+            mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
+    ALOGW_IF(err, "%s: failed to set RT priority %d for pid %d tid %d; error %d",
+            __func__, mHalThreadPriority, threadPid, threadId, err);
+    // Audio will still work, but may be more susceptible to glitches.
+    return err == 0;
+}
+
+status_t EffectHalHidl::checkHalThreadPriority() {
+    if (mHalThreadPriority == kRTPriorityDisabled) return OK;
+    if (mHalThreadPriority < kRTPriorityMin
+            || mHalThreadPriority > kRTPriorityMax) return BAD_VALUE;
+
+    pid_t halPid, halWorkerTid;
+    const status_t status = getHalPid(&halPid) ?: getHalWorkerTid(&halWorkerTid);
+    const bool success = status == OK && requestHalThreadPriority(halPid, halWorkerTid);
+    ALOGD("%s: effectId %lld RT priority(%d) request %s%s",
+            __func__, (long long)mEffectId, mHalThreadPriority,
+            success ? "succeeded" : "failed",
+            status == OK
+                    ? base::StringPrintf(" for pid:%d tid:%d", halPid, halWorkerTid).c_str()
+                    : " (pid / tid cannot be read)");
+    return success ? OK : status != OK ? status : INVALID_OPERATION /* request failed */;
+}
+
 } // namespace effect
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index e139768..94dcd7e 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -78,6 +78,11 @@
     std::unique_ptr<StatusMQ> mStatusMQ;
     EventFlag* mEfGroup;
     bool mIsInput = false;
+    static constexpr int32_t kRTPriorityMin = 1;
+    static constexpr int32_t kRTPriorityMax = 3;
+    static constexpr int kRTPriorityDisabled = 0;
+    // Typical RealTime mHalThreadPriority ranges from 1 (low) to 3 (high).
+    int mHalThreadPriority = kRTPriorityDisabled;
 
     // Can not be constructed directly by clients.
     EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
@@ -93,6 +98,10 @@
             uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
             uint32_t *replySize, void *pReplyData);
     status_t setProcessBuffers();
+    status_t getHalPid(pid_t *pid) const;
+    status_t getHalWorkerTid(pid_t *tid);
+    bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
+    status_t checkHalThreadPriority();
 };
 
 } // namespace effect
diff --git a/media/libaudioprocessing/TEST_MAPPING b/media/libaudioprocessing/TEST_MAPPING
index 9aff137..5d3fb0a 100644
--- a/media/libaudioprocessing/TEST_MAPPING
+++ b/media/libaudioprocessing/TEST_MAPPING
@@ -1,5 +1,5 @@
 {
-  "postsubmit": [
+  "presubmit": [
     {
       "name": "CtsNativeMediaAAudioTestCases",
       "options" : [
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index 03ce329..ba511fe 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -46,10 +46,9 @@
     shared_libs: [
         "libaudioutils",
         "libbase",
-        "libbinder",
         "liblog",
         "libutils",
-        "libvibrator",
+        "libvibratorutils",
     ],
 
     relative_install_path: "soundfx",
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
new file mode 100644
index 0000000..2defa4e
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BundleContext"
+#include <Utils.h>
+
+#include "BundleContext.h"
+#include "BundleTypes.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+RetCode BundleContext::init() {
+    // init with pre-defined preset NORMAL
+    for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+        mBandGaindB[i] = lvm::kSoftPresets[0 /* normal */][i];
+    }
+
+    // allocate lvm instance
+    LVM_ReturnStatus_en status;
+    LVM_InstParams_t params = {.BufferMode = LVM_UNMANAGED_BUFFERS,
+                               .MaxBlockSize = lvm::MAX_CALL_SIZE,
+                               .EQNB_NumBands = lvm::MAX_NUM_BANDS,
+                               .PSA_Included = LVM_PSA_ON};
+    status = LVM_GetInstanceHandle(&mInstance, &params);
+    GOTO_IF_LVM_ERROR(status, deinit, "LVM_GetInstanceHandleFailed");
+
+    // set control
+    LVM_ControlParams_t controlParams;
+    initControlParameter(controlParams);
+    status = LVM_SetControlParameters(mInstance, &controlParams);
+    GOTO_IF_LVM_ERROR(status, deinit, "LVM_SetControlParametersFailed");
+
+    /* Set the headroom parameters */
+    LVM_HeadroomParams_t headroomParams;
+    initHeadroomParameter(headroomParams);
+    status = LVM_SetHeadroomParams(mInstance, &headroomParams);
+    GOTO_IF_LVM_ERROR(status, deinit, "LVM_SetHeadroomParamsFailed");
+
+    return RetCode::SUCCESS;
+
+deinit:
+    deInit();
+    return RetCode::ERROR_EFFECT_LIB_ERROR;
+}
+
+void BundleContext::deInit() {
+    if (mInstance) {
+        LVM_DelInstanceHandle(&mInstance);
+        mInstance = nullptr;
+    }
+}
+
+RetCode BundleContext::enable() {
+    LVM_ControlParams_t params;
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
+    if (mType == lvm::BundleEffectType::EQUALIZER) {
+        LOG(DEBUG) << __func__ << " enable bundle EQ";
+        params.EQNB_OperatingMode = LVM_EQNB_ON;
+    }
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
+    mEnabled = true;
+    // LvmEffect_limitLevel(pContext);
+    return RetCode::SUCCESS;
+}
+
+RetCode BundleContext::disable() {
+    LVM_ControlParams_t params;
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
+    if (mType == lvm::BundleEffectType::EQUALIZER) {
+        LOG(DEBUG) << __func__ << " disable bundle EQ";
+        params.EQNB_OperatingMode = LVM_EQNB_OFF;
+    }
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
+    mEnabled = false;
+    // LvmEffect_limitLevel(pContext);
+    return RetCode::SUCCESS;
+}
+
+LVM_INT16 BundleContext::LVC_ToDB_s32Tos16(LVM_INT32 Lin_fix) const {
+    LVM_INT16 db_fix;
+    LVM_INT16 Shift;
+    LVM_INT16 SmallRemainder;
+    LVM_UINT32 Remainder = (LVM_UINT32)Lin_fix;
+
+    /* Count leading bits, 1 cycle in assembly*/
+    for (Shift = 0; Shift < 32; Shift++) {
+        if ((Remainder & 0x80000000U) != 0) {
+            break;
+        }
+        Remainder = Remainder << 1;
+    }
+
+    /*
+     * Based on the approximation equation (for Q11.4 format):
+     *
+     * dB = -96 * Shift + 16 * (8 * Remainder - 2 * Remainder^2)
+     */
+    db_fix = (LVM_INT16)(-96 * Shift); /* Six dB steps in Q11.4 format*/
+    SmallRemainder = (LVM_INT16)((Remainder & 0x7fffffff) >> 24);
+    db_fix = (LVM_INT16)(db_fix + SmallRemainder);
+    SmallRemainder = (LVM_INT16)(SmallRemainder * SmallRemainder);
+    db_fix = (LVM_INT16)(db_fix - (LVM_INT16)((LVM_UINT16)SmallRemainder >> 9));
+
+    /* Correct for small offset */
+    db_fix = (LVM_INT16)(db_fix - 5);
+
+    return db_fix;
+}
+
+// TODO: replace with more generic approach, like: audio_utils_power_from_amplitude
+int16_t BundleContext::VolToDb(uint32_t vol) const {
+    int16_t dB;
+
+    dB = LVC_ToDB_s32Tos16(vol << 7);
+    dB = (dB + 8) >> 4;
+    dB = (dB < -96) ? -96 : dB;
+
+    return dB;
+}
+
+RetCode BundleContext::setVolumeStereo(const Parameter::VolumeStereo& volume) {
+    LVM_ControlParams_t params;
+    LVM_ReturnStatus_en status = LVM_SUCCESS;
+
+    // Convert volume to dB
+    int leftdB = VolToDb(volume.left);
+    int rightdB = VolToDb(volume.right);
+    int maxdB = std::max(leftdB, rightdB);
+    int pandB = rightdB - leftdB;
+    // TODO: add volume effect implementation here:
+    // android::VolumeSetVolumeLevel(pContext, (int16_t)(maxdB * 100));
+    LOG(DEBUG) << __func__ << " pandB: " << pandB << " maxdB " << maxdB;
+
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, "");
+
+    params.VC_Balance = pandB;
+
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, "");
+
+    mVolumeStereo = volume;
+    return RetCode::SUCCESS;
+}
+
+RetCode BundleContext::setEqualizerPreset(const int presetIdx) {
+    if (presetIdx < 0 || presetIdx >= lvm::MAX_NUM_PRESETS) {
+        return RetCode::ERROR_ILLEGAL_PARAMETER;
+    }
+
+    std::vector<Equalizer::BandLevel> bandLevels;
+    bandLevels.reserve(lvm::MAX_NUM_BANDS);
+    for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+        bandLevels.emplace_back(Equalizer::BandLevel{i, lvm::kSoftPresets[presetIdx][i]});
+    }
+
+    RetCode ret = updateControlParameter(bandLevels);
+    if (RetCode::SUCCESS == ret) {
+        mCurPresetIdx = presetIdx;
+        LOG(INFO) << __func__ << " success with " << presetIdx;
+    } else {
+        LOG(ERROR) << __func__ << " failed to setPreset " << presetIdx;
+    }
+    return ret;
+}
+
+RetCode BundleContext::setEqualizerBandLevels(const std::vector<Equalizer::BandLevel>& bandLevels) {
+    RETURN_VALUE_IF(bandLevels.size() > lvm::MAX_NUM_BANDS || bandLevels.empty(),
+                    RetCode::ERROR_ILLEGAL_PARAMETER, "sizeExceedMax");
+    RetCode ret = updateControlParameter(bandLevels);
+    if (RetCode::SUCCESS == ret) {
+        mCurPresetIdx = lvm::PRESET_CUSTOM;
+        LOG(INFO) << __func__ << " succeed with " << ::android::internal::ToString(bandLevels);
+    } else {
+        LOG(ERROR) << __func__ << " failed with " << ::android::internal::ToString(bandLevels);
+    }
+    return ret;
+}
+
+std::vector<Equalizer::BandLevel> BundleContext::getEqualizerBandLevels() const {
+    std::vector<Equalizer::BandLevel> bandLevels;
+    bandLevels.reserve(lvm::MAX_NUM_BANDS);
+    for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+        bandLevels.emplace_back(Equalizer::BandLevel{i, mBandGaindB[i]});
+    }
+    return bandLevels;
+}
+
+bool BundleContext::isBandLevelIndexInRange(
+        const std::vector<Equalizer::BandLevel>& bandLevels) const {
+    const auto [min, max] =
+            std::minmax_element(bandLevels.begin(), bandLevels.end(),
+                                [](const auto& a, const auto& b) { return a.index < b.index; });
+    return min->index >= 0 && max->index < lvm::MAX_NUM_BANDS;
+}
+
+RetCode BundleContext::updateControlParameter(const std::vector<Equalizer::BandLevel>& bandLevels) {
+    RETURN_VALUE_IF(!isBandLevelIndexInRange(bandLevels), RetCode::ERROR_ILLEGAL_PARAMETER,
+                    "indexOutOfRange");
+
+    std::array<int, lvm::MAX_NUM_BANDS> tempLevel;
+    for (const auto& it : bandLevels) {
+        tempLevel[it.index] = it.levelMb;
+    }
+
+    LVM_ControlParams_t params;
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+    for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+        params.pEQNB_BandDefinition[i].Frequency = lvm::kPresetsFrequencies[i];
+        params.pEQNB_BandDefinition[i].QFactor = lvm::kPresetsQFactors[i];
+        params.pEQNB_BandDefinition[i].Gain = tempLevel[i];
+    }
+
+    RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, &params),
+                    RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+    mBandGaindB = tempLevel;
+    LOG(INFO) << __func__ << " update bandGain to " << ::android::internal::ToString(mBandGaindB);
+
+    return RetCode::SUCCESS;
+}
+
+void BundleContext::initControlParameter(LVM_ControlParams_t& params) const {
+    /* General parameters */
+    params.OperatingMode = LVM_MODE_ON;
+    params.SampleRate = LVM_FS_44100;
+    params.SourceFormat = LVM_STEREO;
+    params.SpeakerType = LVM_HEADPHONES;
+
+    /* Concert Sound parameters */
+    params.VirtualizerOperatingMode = LVM_MODE_OFF;
+    params.VirtualizerType = LVM_CONCERTSOUND;
+    params.VirtualizerReverbLevel = 100;
+    params.CS_EffectLevel = LVM_CS_EFFECT_NONE;
+
+    params.EQNB_OperatingMode = LVM_EQNB_OFF;
+    params.EQNB_NBands = lvm::MAX_NUM_BANDS;
+    params.pEQNB_BandDefinition = getDefaultEqualizerBandDefs();
+
+    /* Volume Control parameters */
+    params.VC_EffectLevel = 0;
+    params.VC_Balance = 0;
+
+    /* Treble Enhancement parameters */
+    params.TE_OperatingMode = LVM_TE_OFF;
+    params.TE_EffectLevel = 0;
+
+    /* PSA Control parameters */
+    params.PSA_Enable = LVM_PSA_OFF;
+    params.PSA_PeakDecayRate = (LVM_PSA_DecaySpeed_en)0;
+
+    /* Bass Enhancement parameters */
+    params.BE_OperatingMode = LVM_BE_OFF;
+    params.BE_EffectLevel = 0;
+    params.BE_CentreFreq = LVM_BE_CENTRE_90Hz;
+    params.BE_HPF = LVM_BE_HPF_ON;
+
+    /* PSA Control parameters */
+    params.PSA_Enable = LVM_PSA_OFF;
+    params.PSA_PeakDecayRate = LVM_PSA_SPEED_MEDIUM;
+
+    /* TE Control parameters */
+    params.TE_OperatingMode = LVM_TE_OFF;
+    params.TE_EffectLevel = 0;
+
+    params.NrChannels = audio_channel_count_from_out_mask(AUDIO_CHANNEL_OUT_STEREO);
+    params.ChMask = AUDIO_CHANNEL_OUT_STEREO;
+    params.SourceFormat = LVM_STEREO;
+}
+
+void BundleContext::initHeadroomParameter(LVM_HeadroomParams_t& params) const {
+    params.pHeadroomDefinition = getDefaultEqualizerHeadroomBanDefs();
+    params.NHeadroomBands = 2;
+    params.Headroom_OperatingMode = LVM_HEADROOM_OFF;
+}
+
+LVM_EQNB_BandDef_t *BundleContext::getDefaultEqualizerBandDefs() {
+    static LVM_EQNB_BandDef_t* BandDefs = []() {
+        static LVM_EQNB_BandDef_t tempDefs[lvm::MAX_NUM_BANDS];
+        /* N-Band Equaliser parameters */
+        for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+            tempDefs[i].Frequency = lvm::kPresetsFrequencies[i];
+            tempDefs[i].QFactor = lvm::kPresetsQFactors[i];
+            tempDefs[i].Gain = lvm::kSoftPresets[0/* normal */][i];
+        }
+        return tempDefs;
+    }();
+
+    return BandDefs;
+}
+
+LVM_HeadroomBandDef_t *BundleContext::getDefaultEqualizerHeadroomBanDefs() {
+    static LVM_HeadroomBandDef_t HeadroomBandDef[LVM_HEADROOM_MAX_NBANDS] = {
+            {
+                    .Limit_Low = 20,
+                    .Limit_High = 4999,
+                    .Headroom_Offset = 0,
+            },
+            {
+                    .Limit_Low = 5000,
+                    .Limit_High = 24000,
+                    .Headroom_Offset = 0,
+            },
+    };
+    return HeadroomBandDef;
+}
+
+}  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
new file mode 100644
index 0000000..616ab78
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/logging.h>
+#include <array>
+
+#include "BundleTypes.h"
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class BundleContext final : public EffectContext {
+  public:
+    BundleContext(int statusDepth, const Parameter::Common& common,
+                  const lvm::BundleEffectType& type)
+        : EffectContext(statusDepth, common), mType(type) {
+        LOG(DEBUG) << __func__ << type;
+    }
+    ~BundleContext() override {
+        LOG(DEBUG) << __func__;
+        deInit();
+    }
+
+    RetCode init();
+    void deInit();
+    lvm::BundleEffectType getBundleType() const { return mType; }
+
+    RetCode enable();
+    RetCode disable();
+
+    LVM_Handle_t getLvmInstance() const { return mInstance; }
+
+    void setSampleRate (const int sampleRate) { mSampleRate = sampleRate; }
+    int getSampleRate() const { return mSampleRate; }
+
+    void setChannelMask(const aidl::android::media::audio::common::AudioChannelLayout& chMask) {
+        mChMask = chMask;
+    }
+    aidl::android::media::audio::common::AudioChannelLayout getChannelMask() const {
+        return mChMask;
+    }
+
+    RetCode setEqualizerPreset(const int presetIdx);
+    int getEqualizerPreset() const { return mCurPresetIdx; }
+    RetCode setEqualizerBandLevels(const std::vector<Equalizer::BandLevel>& bandLevels);
+    std::vector<Equalizer::BandLevel> getEqualizerBandLevels() const;
+
+    RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override;
+    Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; };
+
+  private:
+    const lvm::BundleEffectType mType;
+    bool mEnabled = false;
+    LVM_Handle_t mInstance = nullptr;
+
+    aidl::android::media::audio::common::AudioDeviceDescription mVirtualizerForcedDevice;
+    aidl::android::media::audio::common::AudioChannelLayout mChMask;
+
+    int mSampleRate = LVM_FS_44100;
+    int mSamplesPerSecond = 0;
+    int mSamplesToExitCountEq = 0;
+    int mSamplesToExitCountBb = 0;
+    int mSamplesToExitCountVirt = 0;
+    int mFrameCount = 0;
+
+    /* Bitmask whether drain is in progress due to disabling the effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int mEffectInDrain = 0;
+
+    /* Bitmask whether process() was called for a particular effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int mEffectProcessCalled = 0;
+    int mNumberEffectsEnabled = 0;
+    int mNumberEffectsCalled = 0;
+    bool mFirstVolume = false;
+    // Bass
+    bool mBassTempDisabled = false;
+    int mBassStrengthSaved = 0;
+    // Equalizer
+    int mCurPresetIdx = lvm::PRESET_CUSTOM; /* Current preset being used */
+    std::array<int, lvm::MAX_NUM_BANDS> mBandGaindB;
+    // Virtualizer
+    int mVirtStrengthSaved = 0; /* Conversion between Get/Set */
+    bool mVirtualizerTempDisabled = false;
+    // Volume
+    int mLevelSaved = 0; /* for when mute is set, level must be saved */
+    bool mMuteEnabled = false; /* Must store as mute = -96dB level */
+
+    void initControlParameter(LVM_ControlParams_t& params) const;
+    void initHeadroomParameter(LVM_HeadroomParams_t& params) const;
+    int16_t VolToDb(uint32_t vol) const;
+    LVM_INT16 LVC_ToDB_s32Tos16(LVM_INT32 Lin_fix) const;
+    RetCode updateControlParameter(const std::vector<Equalizer::BandLevel>& bandLevels);
+    bool isBandLevelIndexInRange(const std::vector<Equalizer::BandLevel>& bandLevels) const;
+    static LVM_EQNB_BandDef_t* getDefaultEqualizerBandDefs();
+    static LVM_HeadroomBandDef_t* getDefaultEqualizerHeadroomBanDefs();
+};
+
+}  // namespace aidl::android::hardware::audio::effect
+
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
new file mode 100644
index 0000000..1772bd1
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#include <array>
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+#include "effect-impl/EffectUUID.h"
+#include "effect-impl/EffectTypes.h"
+#include "LVM.h"
+
+namespace aidl::android::hardware::audio::effect {
+namespace lvm {
+
+constexpr inline size_t MAX_NUM_PRESETS = 10;
+constexpr inline size_t MAX_NUM_BANDS = 5;
+constexpr inline size_t MAX_CALL_SIZE = 256;
+constexpr inline int BASS_BOOST_CUP_LOAD_ARM9E = 150;   // Expressed in 0.1 MIPS
+constexpr inline int VIRTUALIZER_CUP_LOAD_ARM9E = 120;  // Expressed in 0.1 MIPS
+constexpr inline int EQUALIZER_CUP_LOAD_ARM9E = 220;    // Expressed in 0.1 MIPS
+constexpr inline int VOLUME_CUP_LOAD_ARM9E = 0;         // Expressed in 0.1 MIPS
+constexpr inline int BUNDLE_MEM_USAGE = 25;             // Expressed in kB
+constexpr inline int PRESET_CUSTOM = -1;
+
+static const std::vector<Equalizer::BandFrequency> kEqBandFrequency = {{0, 30000, 120000},
+                                                                       {1, 120001, 460000},
+                                                                       {2, 460001, 1800000},
+                                                                       {3, 1800001, 7000000},
+                                                                       {4, 7000001, 20000000}};
+
+/*
+Frequencies in Hz
+Note: If these frequencies change, please update LimitLevel values accordingly.
+*/
+constexpr inline std::array<uint16_t, MAX_NUM_BANDS> kPresetsFrequencies = {60, 230, 910, 3600,
+                                                                            14000};
+
+/* Q factor multiplied by 100 */
+constexpr inline std::array<uint16_t, MAX_NUM_BANDS> kPresetsQFactors = {96, 96, 96, 96, 96};
+
+constexpr inline std::array<std::array<int16_t, MAX_NUM_BANDS>, MAX_NUM_PRESETS> kSoftPresets = {
+        {{3, 0, 0, 0, 3},    /* Normal Preset */
+         {5, 3, -2, 4, 4},   /* Classical Preset */
+         {6, 0, 2, 4, 1},    /* Dance Preset */
+         {0, 0, 0, 0, 0},    /* Flat Preset */
+         {3, 0, 0, 2, -1},   /* Folk Preset */
+         {4, 1, 9, 3, 0},    /* Heavy Metal Preset */
+         {5, 3, 0, 1, 3},    /* Hip Hop Preset */
+         {4, 2, -2, 2, 5},   /* Jazz Preset */
+         {-1, 2, 5, 1, -2},  /* Pop Preset */
+         {5, 3, -1, 3, 5}}}; /* Rock Preset */
+
+static const std::vector<Equalizer::Preset> kEqPresets = {
+        {0, "Normal"},      {1, "Classical"}, {2, "Dance"}, {3, "Flat"}, {4, "Folk"},
+        {5, "Heavy Metal"}, {6, "Hip Hop"},   {7, "Jazz"},  {8, "Pop"},  {9, "Rock"}};
+
+static const Equalizer::Capability kEqCap = {.bandFrequencies = kEqBandFrequency,
+                                             .presets = kEqPresets};
+
+static const Descriptor kEqualizerDesc = {
+        .common = {.id = {.type = EqualizerTypeUUID,
+                          .uuid = EqualizerBundleImplUUID,
+                          .proxy = std::nullopt},
+                   .flags = {.type = Flags::Type::INSERT,
+                             .insert = Flags::Insert::FIRST,
+                             .volume = Flags::Volume::CTRL},
+                   .name = "EqualizerBundle",
+                   .implementor = "NXP Software Ltd."},
+        .capability = Capability::make<Capability::equalizer>(kEqCap)};
+
+// TODO: add descriptors for other bundle effect types here.
+static const Descriptor kVirtualizerDesc;
+static const Descriptor kBassBoostDesc;
+static const Descriptor kVolumeDesc;
+
+/* The following tables have been computed using the actual levels measured by the output of
+ * white noise or pink noise (IEC268-1) for the EQ and BassBoost Effects. These are estimates of
+ * the actual energy that 'could' be present in the given band.
+ * If the frequency values in EQNB_5BandPresetsFrequencies change, these values might need to be
+ * updated.
+ */
+constexpr inline std::array<float, MAX_NUM_BANDS> kBandEnergyCoefficient = {7.56, 9.69, 9.59, 7.37,
+                                                                            2.88};
+
+constexpr inline std::array<float, MAX_NUM_BANDS - 1> kBandEnergyCrossCoefficient = {126.0, 115.0,
+                                                                                     125.0, 104.0};
+
+constexpr inline std::array<float, MAX_NUM_BANDS> kBassBoostEnergyCrossCoefficient = {
+        221.21, 208.10, 28.16, 0.0, 0.0};
+
+constexpr inline float kBassBoostEnergyCoefficient = 9.00;
+
+constexpr inline float kVirtualizerContribution = 1.9;
+
+enum class BundleEffectType {
+    BASS_BOOST,
+    VIRTUALIZER,
+    EQUALIZER,
+    VOLUME,
+};
+
+inline std::ostream& operator<<(std::ostream& out, const BundleEffectType& type) {
+    switch (type) {
+        case BundleEffectType::BASS_BOOST:
+            return out << "BASS_BOOST";
+        case BundleEffectType::VIRTUALIZER:
+            return out << "VIRTUALIZER";
+        case BundleEffectType::EQUALIZER:
+            return out << "EQUALIZER";
+        case BundleEffectType::VOLUME:
+            return out << "VOLUME";
+    }
+    return out << "EnumBundleEffectTypeError";
+}
+
+inline std::ostream& operator<<(std::ostream& out, const LVM_ReturnStatus_en& status) {
+    switch (status) {
+        case LVM_SUCCESS:
+            return out << "LVM_SUCCESS";
+        case LVM_ALIGNMENTERROR:
+            return out << "LVM_ALIGNMENTERROR";
+        case LVM_NULLADDRESS:
+            return out << "LVM_NULLADDRESS";
+        case LVM_OUTOFRANGE:
+            return out << "LVM_OUTOFRANGE";
+        case LVM_INVALIDNUMSAMPLES:
+            return out << "LVM_INVALIDNUMSAMPLES";
+        case LVM_WRONGAUDIOTIME:
+            return out << "LVM_WRONGAUDIOTIME";
+        case LVM_ALGORITHMDISABLED:
+            return out << "LVM_ALGORITHMDISABLED";
+        case LVM_ALGORITHMPSA:
+            return out << "LVM_ALGORITHMPSA";
+        case LVM_RETURNSTATUS_DUMMY:
+            return out << "LVM_RETURNSTATUS_DUMMY";
+    }
+    return out << "EnumLvmRetStatusError";
+}
+
+#define GOTO_IF_LVM_ERROR(status, tag, log)                                       \
+    do {                                                                          \
+        LVM_ReturnStatus_en temp = (status);                                      \
+        if (temp != LVM_SUCCESS) {                                                \
+            LOG(ERROR) << __func__ << " return status: " << temp << " " << (log); \
+            goto tag;                                                             \
+        }                                                                         \
+    } while (0)
+
+}  // namespace lvm
+}  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
new file mode 100644
index 0000000..569356f
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectBundleAidl"
+#include <Utils.h>
+#include <algorithm>
+#include <unordered_set>
+
+#include <android-base/logging.h>
+#include <fmq/AidlMessageQueue.h>
+#include <audio_effects/effect_bassboost.h>
+#include <audio_effects/effect_equalizer.h>
+#include <audio_effects/effect_virtualizer.h>
+
+#include "EffectBundleAidl.h"
+#include <LVM.h>
+#include <limits.h>
+
+using aidl::android::hardware::audio::effect::EffectBundleAidl;
+using aidl::android::hardware::audio::effect::EqualizerBundleImplUUID;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* uuid,
+                                           std::shared_ptr<IEffect>* instanceSpp) {
+    if (uuid == nullptr || *uuid != EqualizerBundleImplUUID) {
+        LOG(ERROR) << __func__ << "uuid not supported";
+        return EX_ILLEGAL_ARGUMENT;
+    }
+    if (instanceSpp) {
+        *instanceSpp = ndk::SharedRefBase::make<EffectBundleAidl>(*uuid);
+        LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+        return EX_NONE;
+    } else {
+        LOG(ERROR) << __func__ << " invalid input parameter!";
+        return EX_ILLEGAL_ARGUMENT;
+    }
+}
+
+extern "C" binder_exception_t destroyEffect(const std::shared_ptr<IEffect>& instanceSp) {
+    if (!instanceSp) {
+        LOG(ERROR) << __func__ << "nullInstance";
+        return EX_ILLEGAL_ARGUMENT;
+    }
+    State state;
+    ndk::ScopedAStatus status = instanceSp->getState(&state);
+    if (!status.isOk() || State::INIT != state) {
+        LOG(ERROR) << __func__ << " instance " << instanceSp.get()
+                   << " in state: " << toString(state) << ", status: " << status.getDescription();
+        return EX_ILLEGAL_STATE;
+    }
+    LOG(DEBUG) << __func__ << " instance " << instanceSp.get() << " destroyed";
+    return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+EffectBundleAidl::EffectBundleAidl(const AudioUuid& uuid) {
+    LOG(DEBUG) << __func__ << uuid.toString();
+    if (uuid == EqualizerBundleImplUUID) {
+        mType = lvm::BundleEffectType::EQUALIZER;
+        mDescriptor = &lvm::kEqualizerDesc;
+    } else {
+        // TODO: add other bundle effect types here.
+        LOG(ERROR) << __func__ << uuid.toString() << " not supported yet!";
+    }
+}
+
+EffectBundleAidl::~EffectBundleAidl() {
+    releaseContext();
+    LOG(DEBUG) << __func__;
+}
+
+ndk::ScopedAStatus EffectBundleAidl::getDescriptor(Descriptor* _aidl_return) {
+    RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+    LOG(DEBUG) << _aidl_return->toString();
+    *_aidl_return = *mDescriptor;
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterCommon(const Parameter& param) {
+    std::lock_guard lg(mMutex);
+    RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+    auto tag = param.getTag();
+    switch (tag) {
+        case Parameter::common:
+            RETURN_IF(mContext->setCommon(param.get<Parameter::common>()) != RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setCommFailed");
+            break;
+        case Parameter::deviceDescription:
+            RETURN_IF(mContext->setOutputDevice(param.get<Parameter::deviceDescription>()) !=
+                              RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setDeviceFailed");
+            break;
+        case Parameter::mode:
+            RETURN_IF(mContext->setAudioMode(param.get<Parameter::mode>()) != RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setModeFailed");
+            break;
+        case Parameter::source:
+            RETURN_IF(mContext->setAudioSource(param.get<Parameter::source>()) != RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setSourceFailed");
+            break;
+        case Parameter::volumeStereo:
+            RETURN_IF(mContext->setVolumeStereo(param.get<Parameter::volumeStereo>()) !=
+                              RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setVolumeStereoFailed");
+            break;
+        default: {
+            LOG(ERROR) << __func__ << " unsupportedParameterTag " << toString(tag);
+            return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+                                                                    "commonParamNotSupported");
+        }
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterSpecific(const Parameter::Specific& specific) {
+    LOG(DEBUG) << __func__ << " specific " << specific.toString();
+    auto tag = specific.getTag();
+    RETURN_IF(tag != Parameter::Specific::equalizer, EX_ILLEGAL_ARGUMENT,
+              "specificParamNotSupported");
+    RETURN_IF(mContext == nullptr, EX_NULL_POINTER , "nullContext");
+
+    auto& eq = specific.get<Parameter::Specific::equalizer>();
+    auto eqTag = eq.getTag();
+    switch (eqTag) {
+        case Equalizer::preset:
+            RETURN_IF(mContext->setEqualizerPreset(eq.get<Equalizer::preset>()) != RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setBandLevelsFailed");
+            break;
+        case Equalizer::bandLevels:
+            RETURN_IF(mContext->setEqualizerBandLevels(eq.get<Equalizer::bandLevels>()) !=
+                              RetCode::SUCCESS,
+                      EX_ILLEGAL_ARGUMENT, "setBandLevelsFailed");
+            break;
+        default:
+            LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+            return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+                                                                    "eqTagNotSupported");
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::getParameterSpecific(const Parameter::Id& id,
+                                                          Parameter::Specific* specific) {
+    RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+    auto tag = id.getTag();
+    RETURN_IF(Parameter::Id::equalizerTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+    auto eqId = id.get<Parameter::Id::equalizerTag>();
+    auto eqIdTag = eqId.getTag();
+    switch (eqIdTag) {
+        case Equalizer::Id::commonTag:
+            return getParameterEqualizer(eqId.get<Equalizer::Id::commonTag>(), specific);
+        default:
+            LOG(ERROR) << __func__ << " tag " << toString(eqIdTag) << " not supported";
+            return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+                                                                    "EqualizerTagNotSupported");
+    }
+}
+
+ndk::ScopedAStatus EffectBundleAidl::getParameterEqualizer(const Equalizer::Tag& tag,
+                                                           Parameter::Specific* specific) {
+    std::lock_guard lg(mMutex);
+    RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+    Equalizer eqParam;
+    switch (tag) {
+        case Equalizer::bandLevels: {
+            eqParam.set<Equalizer::bandLevels>(mContext->getEqualizerBandLevels());
+            break;
+        }
+        case Equalizer::preset: {
+            eqParam.set<Equalizer::preset>(mContext->getEqualizerPreset());
+            break;
+        }
+        default: {
+            LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+            return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+                    EX_ILLEGAL_ARGUMENT, "unsupportedTag");
+        }
+    }
+
+    specific->set<Parameter::Specific::equalizer>(eqParam);
+    return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> EffectBundleAidl::createContext(const Parameter::Common& common) {
+    if (mContext) {
+        LOG(DEBUG) << __func__ << " context already exist";
+        return mContext;
+    }
+
+    // GlobalSession is a singleton
+    mContext =
+            GlobalSession::getGlobalSession().createSession(mType, 1 /* statusFmqDepth */, common);
+    return mContext;
+}
+
+RetCode EffectBundleAidl::releaseContext() {
+    if (mContext) {
+        GlobalSession::getGlobalSession().releaseSession(mType, mContext->getSessionId());
+        mContext.reset();
+    }
+    return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status EffectBundleAidl::effectProcessImpl(float* in, float* out, int sampleToProcess) {
+    LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << sampleToProcess;
+    if (!mContext) {
+        LOG(ERROR) << __func__ << " nullContext";
+        return {EX_NULL_POINTER, 0, 0};
+    }
+
+    auto frameSize = mContext->getInputFrameSize();
+    if (0 == frameSize) {
+        LOG(ERROR) << __func__ << " frameSizeIs0";
+        return {EX_ILLEGAL_ARGUMENT, 0, 0};
+    }
+
+    LOG(DEBUG) << __func__ << " start processing";
+    LVM_UINT16 frames = sampleToProcess * sizeof(float) / frameSize;
+    LVM_ReturnStatus_en lvmStatus = LVM_Process(mContext->getLvmInstance(), in, out, frames, 0);
+    if (lvmStatus != LVM_SUCCESS) {
+        LOG(ERROR) << __func__ << lvmStatus;
+        return {EX_UNSUPPORTED_OPERATION, 0, 0};
+    }
+    LOG(DEBUG) << __func__ << " done processing";
+    return {STATUS_OK, sampleToProcess, sampleToProcess};
+}
+
+}  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
new file mode 100644
index 0000000..5fdf301
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#include <functional>
+#include <map>
+#include <memory>
+#include <mutex>
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+#include <android-base/logging.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+
+#include "BundleContext.h"
+#include "BundleTypes.h"
+#include "GlobalSession.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class EffectBundleAidl final : public EffectImpl {
+  public:
+    explicit EffectBundleAidl(const AudioUuid& uuid);
+    ~EffectBundleAidl() override;
+
+    ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+    ndk::ScopedAStatus setParameterCommon(const Parameter& param) override;
+    ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+    ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+                                            Parameter::Specific* specific) override;
+    IEffect::Status effectProcessImpl(float *in, float *out, int process) override;
+
+    std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+    RetCode releaseContext() override;
+
+    ndk::ScopedAStatus commandStart() override {
+        mContext->enable();
+        return ndk::ScopedAStatus::ok();
+    }
+    ndk::ScopedAStatus commandStop() override {
+        mContext->disable();
+        return ndk::ScopedAStatus::ok();
+    }
+    ndk::ScopedAStatus commandReset() override {
+            mContext->disable();
+            return ndk::ScopedAStatus::ok();
+    }
+
+  private:
+    const Descriptor* mDescriptor;
+    lvm::BundleEffectType mType = lvm::BundleEffectType::EQUALIZER;
+    std::shared_ptr<BundleContext> mContext;
+
+    IEffect::Status status(binder_status_t status, size_t consumed, size_t produced);
+    ndk::ScopedAStatus getParameterEqualizer(const Equalizer::Tag& tag,
+                                             Parameter::Specific* specific);
+};
+
+}  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h b/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h
new file mode 100644
index 0000000..9226274
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <memory>
+#include <unordered_map>
+
+#include <android-base/logging.h>
+
+#include "BundleContext.h"
+#include "BundleTypes.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+/**
+ * @brief Maintain all effect bundle sessions.
+ *
+ * Sessions are identified with the session ID, maximum of MAX_BUNDLE_SESSIONS is supported by the
+ * bundle implementation.
+ */
+class GlobalSession {
+  public:
+    static GlobalSession& getGlobalSession() {
+        static GlobalSession instance;
+        return instance;
+    }
+
+    bool isSessionIdExist(int sessionId) const { return mSessionMap.count(sessionId); }
+
+    static bool findBundleTypeInList(std::vector<std::shared_ptr<BundleContext>>& list,
+                                     const lvm::BundleEffectType& type, bool remove = false) {
+        auto itor = std::find_if(list.begin(), list.end(),
+                                  [type](const std::shared_ptr<BundleContext>& bundle) {
+                                      return bundle ? bundle->getBundleType() == type : false;
+                                  });
+        if (itor == list.end()) {
+            return false;
+        }
+        if (remove && *itor) {
+            (*itor)->deInit();
+            list.erase(itor);
+        }
+        return true;
+    }
+
+    /**
+     * Create a certain type of BundleContext in shared_ptr container, each session must not have
+     * more than one session for each type.
+     */
+    std::shared_ptr<BundleContext> createSession(const lvm::BundleEffectType& type, int statusDepth,
+                                                 const Parameter::Common& common) {
+        int sessionId = common.session;
+        LOG(DEBUG) << __func__ << type << " with sessionId " << sessionId;
+        std::lock_guard lg(mMutex);
+        if (mSessionMap.count(sessionId) == 0 && mSessionMap.size() >= MAX_BUNDLE_SESSIONS) {
+            LOG(ERROR) << __func__ << " exceed max bundle session";
+            return nullptr;
+        }
+
+        if (mSessionMap.count(sessionId)) {
+            if (findBundleTypeInList(mSessionMap[sessionId], type)) {
+                LOG(ERROR) << __func__ << type << " already exist in session " << sessionId;
+                return nullptr;
+            }
+        }
+
+        auto& list = mSessionMap[sessionId];
+        auto context = std::make_shared<BundleContext>(statusDepth, common, type);
+        RETURN_VALUE_IF(!context, nullptr, "failedToCreateContext");
+
+        RetCode ret = context->init();
+        if (RetCode::SUCCESS != ret) {
+            LOG(ERROR) << __func__ << " context init ret " << ret;
+            return nullptr;
+        }
+        list.push_back(context);
+        return context;
+    }
+
+    void releaseSession(const lvm::BundleEffectType& type, int sessionId) {
+        LOG(DEBUG) << __func__ << type << " sessionId " << sessionId;
+        std::lock_guard lg(mMutex);
+        if (mSessionMap.count(sessionId)) {
+            auto& list = mSessionMap[sessionId];
+            if (!findBundleTypeInList(list, type, true /* remove */)) {
+                LOG(ERROR) << __func__ << " can't find " << type << "in session " << sessionId;
+                return;
+            }
+            if (list.size() == 0) {
+                mSessionMap.erase(sessionId);
+            }
+        }
+    }
+
+  private:
+    // Lock for mSessionMap access.
+    std::mutex mMutex;
+    // Max session number supported.
+    static constexpr int MAX_BUNDLE_SESSIONS = 32;
+    std::unordered_map<int /* session ID */, std::vector<std::shared_ptr<BundleContext>>>
+            mSessionMap GUARDED_BY(mMutex);
+};
+}  // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index 1287514..a32188a 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -100,3 +100,29 @@
         integer_overflow: true,
     },
 }
+
+cc_library_shared {
+    name: "libbundleaidl",
+    srcs: [
+        "Aidl/BundleContext.cpp",
+        "Aidl/EffectBundleAidl.cpp",
+        ":effectCommonFile",
+    ],
+    static_libs: ["libmusicbundle"],
+    defaults: [
+        "aidlaudioservice_defaults",
+        "latest_android_hardware_audio_effect_ndk_shared",
+        "latest_android_media_audio_common_types_ndk_shared",
+    ],
+    local_include_dirs: ["Aidl"],
+    header_libs: [
+        "libaudioeffects",
+        "libhardware_headers",
+    ],
+    shared_libs: [
+        "liblog",
+    ],
+    visibility: [
+        "//hardware/interfaces/audio/aidl/default",
+    ],
+}
\ No newline at end of file
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
index bd8af04..3dee40a 100644
--- a/media/libheadtracking/SensorPoseProvider.cpp
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -122,6 +122,7 @@
     ~SensorPoseProviderImpl() override {
         // Disable all active sensors.
         mEnabledSensors.clear();
+        mQuit = true;
         mLooper->wake();
         mThread.join();
     }
@@ -217,6 +218,7 @@
         std::optional<int32_t> discontinuityCount;
     };
 
+    bool mQuit = false;
     sp<Looper> mLooper;
     Listener* const mListener;
     SensorManager* const mSensorManager;
@@ -260,13 +262,14 @@
 
         initFinished(true);
 
-        while (true) {
+        while (!mQuit) {
             int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr, nullptr, nullptr);
 
             switch (ret) {
                 case ALOOPER_POLL_WAKE:
-                    // Normal way to exit.
-                    return;
+                    // Continue to see if mQuit flag is set.
+                    // This can be spurious (due to bugreport being taken).
+                    continue;
 
                 case kIdent:
                     // Possible events on our queue.
@@ -285,7 +288,8 @@
             ssize_t size = mQueue->filterEvents(&event, actual);
 
             if (size < 0 || size > 1) {
-                ALOGE("Unexpected return value from SensorEventQueue::filterEvents: %zd", size);
+                ALOGE("%s: Unexpected return value from SensorEventQueue::filterEvents: %zd",
+                        __func__, size);
                 break;
             }
             if (size == 0) {
@@ -295,6 +299,7 @@
 
             handleEvent(event);
         }
+        ALOGD("%s: Exiting sensor event loop", __func__);
     }
 
     void handleEvent(const ASensorEvent& event) {
diff --git a/media/libmedia/xsd/api/current.txt b/media/libmedia/xsd/api/current.txt
index 73b5f8d..35aa213 100644
--- a/media/libmedia/xsd/api/current.txt
+++ b/media/libmedia/xsd/api/current.txt
@@ -47,7 +47,9 @@
     method public java.util.List<media.profiles.EncoderProfile> getEncoderProfile_optional();
     method public java.util.List<media.profiles.CamcorderProfiles.ImageDecodingOptional> getImageDecoding_optional();
     method public java.util.List<media.profiles.CamcorderProfiles.ImageEncodingOptional> getImageEncoding_optional();
+    method public int getStartOffsetMs();
     method public void setCameraId(int);
+    method public void setStartOffsetMs(int);
   }
 
   public static class CamcorderProfiles.ImageDecodingOptional {
diff --git a/media/libmedia/xsd/media_profiles.xsd b/media/libmedia/xsd/media_profiles.xsd
index 9664456..dcc3028 100644
--- a/media/libmedia/xsd/media_profiles.xsd
+++ b/media/libmedia/xsd/media_profiles.xsd
@@ -49,6 +49,7 @@
             </xs:element>
         </xs:choice>
         <xs:attribute name="cameraId" type="xs:int"/>
+        <xs:attribute name="startOffsetMs" type="xs:int"/>
     </xs:complexType>
     <xs:complexType name="EncoderProfile">
         <xs:sequence>
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index f3cbdb0..bdf1cbc 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -2699,7 +2699,7 @@
     // This is a benign busy-wait, with the next data request generated 10 ms or more later;
     // nevertheless for power reasons, we don't want to see too many of these.
 
-    ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+    ALOGV_IF(actualSize == 0 && buffer.size() > 0, "callbackwrapper: empty buffer returned");
     unlock();
     return actualSize;
 }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index a0bc8ca..6497b58 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -129,6 +129,7 @@
       mRTPCVOExtMap(-1),
       mRTPCVODegrees(0),
       mRTPSockDscp(0),
+      mRTPSockOptEcn(0),
       mRTPSockNetwork(0),
       mLastSeqNo(0),
       mStarted(false),
@@ -910,6 +911,13 @@
     return OK;
 }
 
+status_t StagefrightRecorder::setParamRtpEcn(int32_t ecn) {
+    ALOGV("setParamRtpEcn: %d", ecn);
+
+    mRTPSockOptEcn = ecn;
+    return OK;
+}
+
 status_t StagefrightRecorder::requestIDRFrame() {
     status_t ret = BAD_VALUE;
     if (mVideoEncoderSource != NULL) {
@@ -1091,6 +1099,11 @@
         if (safe_strtoi32(value.string(), &dscp)) {
             return setParamRtpDscp(dscp);
         }
+    } else if (key == "rtp-param-set-socket-ecn") {
+        int32_t targetEcn;
+        if (safe_strtoi32(value.string(), &targetEcn)) {
+            return setParamRtpEcn(targetEcn);
+        }
     } else if (key == "rtp-param-set-socket-network") {
         int64_t networkHandle;
         if (safe_strtoi64(value.string(), &networkHandle)) {
@@ -1272,6 +1285,9 @@
             if (mRTPSockDscp > 0) {
                 meta->setInt32(kKeyRtpDscp, mRTPSockDscp);
             }
+            if (mRTPSockOptEcn > 0) {
+                meta->setInt32(kKeyRtpEcn, mRTPSockOptEcn);
+            }
 
             status = mWriter->start(meta.get());
             break;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index d7785da..0801101 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -153,6 +153,7 @@
     int32_t mRTPCVOExtMap;
     int32_t mRTPCVODegrees;
     int32_t mRTPSockDscp;
+    int32_t mRTPSockOptEcn;
     int64_t mRTPSockNetwork;
     uint32_t mLastSeqNo;
 
@@ -247,6 +248,7 @@
     status_t setRTPCVOExtMap(int32_t extmap);
     status_t setRTPCVODegrees(int32_t cvoDegrees);
     status_t setParamRtpDscp(int32_t dscp);
+    status_t setParamRtpEcn(int32_t ecn);
     status_t setSocketNetwork(int64_t networkHandle);
     status_t requestIDRFrame();
     void clipVideoBitRate();
diff --git a/media/libmediaplayerservice/fuzzer/Android.bp b/media/libmediaplayerservice/fuzzer/Android.bp
index a36f1d6..5abac81 100644
--- a/media/libmediaplayerservice/fuzzer/Android.bp
+++ b/media/libmediaplayerservice/fuzzer/Android.bp
@@ -60,15 +60,51 @@
     static_libs: [
         "libstagefright_rtsp",
         "libbase",
+        "libstagefright_nuplayer",
+        "libplayerservice_datasource",
+        "libstagefright_timedtext",
+        "libaudioprocessing_base",
     ],
     shared_libs: [
+        "android.hardware.media.omx@1.0",
         "av-types-aidl-cpp",
         "media_permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
+        "libactivitymanager_aidl",
         "libandroid_net",
+        "libaudioclient",
         "libcamera_client",
+        "libcodec2_client",
+        "libcrypto",
+        "libdatasource",
+        "libdrmframework",
         "libgui",
+        "libhidlbase",
+        "liblog",
+        "libmedia_codeclist",
+        "libmedia_omx",
+        "libmediadrm",
         "libmediametrics",
+        "libmediautils",
+        "libmemunreachable",
+        "libnetd_client",
+        "libpowermanager",
+        "libstagefright_httplive",
+        "packagemanager_aidl-cpp",
+        "libfakeservicemanager",
+        "libvibrator",
+        "libnbaio",
+        "libnblog",
+        "libpowermanager",
+        "libaudioprocessing",
+        "libaudioflinger",
+        "libresourcemanagerservice",
+        "libmediametricsservice",
+        "mediametricsservice-aidl-cpp",
+    ],
+    header_libs: [
+        "libaudiohal_headers",
+        "libaudioflinger_headers",
     ],
 }
 
diff --git a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
index b0040fe..4f2da67 100644
--- a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
+++ b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
@@ -18,6 +18,10 @@
 #include <media/stagefright/foundation/AString.h>
 #include "fuzzer/FuzzedDataProvider.h"
 
+#include <AudioFlinger.h>
+#include <MediaPlayerService.h>
+#include <ResourceManagerService.h>
+#include <ServiceManager.h>
 #include <StagefrightRecorder.h>
 #include <camera/Camera.h>
 #include <camera/android/hardware/ICamera.h>
@@ -25,6 +29,7 @@
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
 #include <media/stagefright/PersistentSurface.h>
+#include <mediametricsservice/MediaMetricsService.h>
 #include <thread>
 
 using namespace std;
@@ -305,6 +310,21 @@
     mStfRecorder->reset();
 }
 
+extern "C" int LLVMFuzzerInitialize(int /* *argc */, char /* ***argv */) {
+    /**
+     * Initializing a FakeServiceManager and adding the instances
+     * of all the required services
+     */
+    sp<IServiceManager> fakeServiceManager = new ServiceManager();
+    setDefaultServiceManager(fakeServiceManager);
+    MediaPlayerService::instantiate();
+    AudioFlinger::instantiate();
+    ResourceManagerService::instantiate();
+    fakeServiceManager->addService(String16(MediaMetricsService::kServiceName),
+                                    new MediaMetricsService());
+    return 0;
+}
+
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
     MediaRecorderClientFuzzer mrcFuzzer(data, size);
     mrcFuzzer.process();
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 36e4d4a..1358faa 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -992,6 +992,11 @@
         format->setInt32("auto", !!isAutoselect);
         format->setInt32("default", !!isDefault);
         format->setInt32("forced", !!isForced);
+    } else if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+        int32_t hapticChannelCount;
+        if (meta->findInt32(kKeyHapticChannelCount, &hapticChannelCount)) {
+            format->setInt32("haptic-channel-count", hapticChannelCount);
+        }
     }
 
     return format;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 4851684..727d68d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -555,6 +555,13 @@
         reply->writeInt32(isAuto);
         reply->writeInt32(isDefault);
         reply->writeInt32(isForced);
+    } else if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+        int32_t hapticChannelCount;
+        bool hasHapticChannels = format->findInt32("haptic-channel-count", &hapticChannelCount);
+        reply->writeInt32(hasHapticChannels);
+        if (hasHapticChannels) {
+            reply->writeInt32(hapticChannelCount);
+        }
     }
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index 6a17972..fd03150 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -115,7 +115,7 @@
 
         int sockRtp, sockRtcp;
         ARTPConnection::MakeRTPSocketPair(&sockRtp, &sockRtcp, info->mLocalIp, info->mRemoteIp,
-                info->mLocalPort, info->mRemotePort, info->mSocketNetwork);
+                info->mLocalPort, info->mRemotePort, info->mSocketNetwork, info->mRtpSockOptEcn);
 
         sp<AMessage> notify = new AMessage('accu', this);
 
@@ -125,6 +125,8 @@
         mRTPConn->addStream(sockRtp, sockRtcp, desc, i + 1, notify, false);
         mRTPConn->setSelfID(info->mSelfID);
         mRTPConn->setStaticJitterTimeMs(info->mJbTimeMs);
+        mRTPConn->setRtpSockOptEcn(info->mRtpSockOptEcn);
+        mRTPConn->setIsIPv6(info->mLocalIp);
 
         unsigned long PT;
         AString formatDesc, formatParams;
@@ -719,6 +721,8 @@
     } else if (key == "rtp-param-set-socket-network") {
         int64_t networkHandle = atoll(value);
         setSocketNetwork(networkHandle);
+    } else if (key == "rtp-param-set-socket-ecn") {
+        info->mRtpSockOptEcn = atoi(value);
     } else if (key == "rtp-param-jitter-buffer-time") {
         // clamping min at 40, max at 3000
         info->mJbTimeMs = std::min(std::max(40, atoi(value)), 3000);
diff --git a/media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h
index 7d9bb8f..b2afe86 100644
--- a/media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h
+++ b/media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h
@@ -121,6 +121,8 @@
         uint32_t mSelfID;
         /* extmap:<value> for CVO will be set to here */
         int32_t mCVOExtMap;
+        /* To check ECN is supported or not */
+        int32_t mRtpSockOptEcn;
 
         /* a copy of TrackInfo in RTSPSource */
         sp<AnotherPacketSource> mSource;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 52c4c0f..d6028d9 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -785,7 +785,7 @@
 
     // we cannot change the number of output buffers while OMX is running
     // set up surface to the same count
-    Vector<BufferInfo> &buffers = mBuffers[kPortIndexOutput];
+    std::vector<BufferInfo> &buffers = mBuffers[kPortIndexOutput];
     ALOGV("setting up surface for %zu buffers", buffers.size());
 
     err = native_window_set_buffer_count(nativeWindow, buffers.size());
@@ -825,7 +825,7 @@
     // cancel undequeued buffers to new surface
     if (!storingMetadataInDecodedBuffers()) {
         for (size_t i = 0; i < buffers.size(); ++i) {
-            BufferInfo &info = buffers.editItemAt(i);
+            BufferInfo &info = buffers[i];
             if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
                 ALOGV("canceling buffer %p", info.mGraphicBuffer->getNativeBuffer());
                 err = nativeWindow->cancelBuffer(
@@ -872,7 +872,7 @@
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
     CHECK(mAllocator[portIndex] == NULL);
-    CHECK(mBuffers[portIndex].isEmpty());
+    CHECK(mBuffers[portIndex].empty());
 
     status_t err;
     if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
@@ -951,6 +951,7 @@
 
             const sp<AMessage> &format =
                     portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
+            mBuffers[portIndex].reserve(def.nBufferCountActual);
             for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
                 hidl_memory hidlMemToken;
                 sp<TMemory> hidlMem;
@@ -1039,7 +1040,7 @@
                     }
                 }
 
-                mBuffers[portIndex].push(info);
+                mBuffers[portIndex].push_back(info);
             }
         }
     }
@@ -1250,6 +1251,7 @@
          mComponentName.c_str(), bufferCount, bufferSize);
 
     // Dequeue buffers and send them to OMX
+    mBuffers[kPortIndexOutput].reserve(bufferCount);
     for (OMX_U32 i = 0; i < bufferCount; i++) {
         ANativeWindowBuffer *buf;
         int fenceFd;
@@ -1275,7 +1277,7 @@
         info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
         info.mCodecData = info.mData;
 
-        mBuffers[kPortIndexOutput].push(info);
+        mBuffers[kPortIndexOutput].push_back(info);
 
         IOMX::buffer_id bufferId;
         err = mOMXNode->useBuffer(kPortIndexOutput, graphicBuffer, &bufferId);
@@ -1285,7 +1287,7 @@
             break;
         }
 
-        mBuffers[kPortIndexOutput].editItemAt(i).mBufferID = bufferId;
+        mBuffers[kPortIndexOutput][i].mBufferID = bufferId;
 
         ALOGV("[%s] Registered graphic buffer with ID %u (pointer = %p)",
              mComponentName.c_str(),
@@ -1307,7 +1309,7 @@
     }
 
     for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
-        BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+        BufferInfo *info = &mBuffers[kPortIndexOutput][i];
         if (info->mStatus == BufferInfo::OWNED_BY_US) {
             status_t error = cancelBufferToNativeWindow(info);
             if (err == 0) {
@@ -1336,6 +1338,7 @@
     ALOGV("[%s] Allocating %u meta buffers on output port",
          mComponentName.c_str(), bufferCount);
 
+    mBuffers[kPortIndexOutput].reserve(bufferCount);
     for (OMX_U32 i = 0; i < bufferCount; i++) {
         BufferInfo info;
         info.mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
@@ -1353,7 +1356,7 @@
         info.mCodecData = info.mData;
 
         err = mOMXNode->useBuffer(kPortIndexOutput, OMXBuffer::sPreset, &info.mBufferID);
-        mBuffers[kPortIndexOutput].push(info);
+        mBuffers[kPortIndexOutput].push_back(info);
 
         ALOGV("[%s] allocated meta buffer with ID %u",
                 mComponentName.c_str(), info.mBufferID);
@@ -1462,7 +1465,7 @@
             it != done.cend(); ++it) {
         ssize_t index = it->getIndex();
         if (index >= 0 && (size_t)index < mBuffers[kPortIndexOutput].size()) {
-            mBuffers[kPortIndexOutput].editItemAt(index).mRenderInfo = NULL;
+            mBuffers[kPortIndexOutput][index].mRenderInfo = NULL;
         } else if (index >= 0) {
             // THIS SHOULD NEVER HAPPEN
             ALOGE("invalid index %zd in %zu", index, mBuffers[kPortIndexOutput].size());
@@ -1502,7 +1505,7 @@
         bool stale = false;
         for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
             i--;
-            BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+            BufferInfo *info = &mBuffers[kPortIndexOutput][i];
 
             if (info->mGraphicBuffer != NULL &&
                     info->mGraphicBuffer->handle == buf->handle) {
@@ -1550,8 +1553,7 @@
     BufferInfo *oldest = NULL;
     for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
         i--;
-        BufferInfo *info =
-            &mBuffers[kPortIndexOutput].editItemAt(i);
+        BufferInfo *info = &mBuffers[kPortIndexOutput][i];
         if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW &&
             (oldest == NULL ||
              // avoid potential issues from counter rolling over
@@ -1608,8 +1610,7 @@
     status_t err = OK;
     for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
         i--;
-        BufferInfo *info =
-            &mBuffers[kPortIndexOutput].editItemAt(i);
+        BufferInfo *info = &mBuffers[kPortIndexOutput][i];
 
         // At this time some buffers may still be with the component
         // or being drained.
@@ -1626,7 +1627,7 @@
 }
 
 status_t ACodec::freeBuffer(OMX_U32 portIndex, size_t i) {
-    BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
+    BufferInfo *info = &mBuffers[portIndex][i];
     status_t err = OK;
 
     // there should not be any fences in the metadata
@@ -1666,14 +1667,14 @@
     }
 
     // remove buffer even if mOMXNode->freeBuffer fails
-    mBuffers[portIndex].removeAt(i);
+    mBuffers[portIndex].erase(mBuffers[portIndex].begin() + i);
     return err;
 }
 
 ACodec::BufferInfo *ACodec::findBufferByID(
         uint32_t portIndex, IOMX::buffer_id bufferID, ssize_t *index) {
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
-        BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
+        BufferInfo *info = &mBuffers[portIndex][i];
 
         if (info->mBufferID == bufferID) {
             if (index != NULL) {
@@ -5102,7 +5103,7 @@
     size_t n = 0;
 
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
-        const BufferInfo &info = mBuffers[portIndex].itemAt(i);
+        const BufferInfo &info = mBuffers[portIndex][i];
 
         if (info.mStatus == BufferInfo::OWNED_BY_COMPONENT) {
             ++n;
@@ -5116,7 +5117,7 @@
     size_t n = 0;
 
     for (size_t i = 0; i < mBuffers[kPortIndexOutput].size(); ++i) {
-        const BufferInfo &info = mBuffers[kPortIndexOutput].itemAt(i);
+        const BufferInfo &info = mBuffers[kPortIndexOutput][i];
 
         if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
             ++n;
@@ -5143,7 +5144,7 @@
 bool ACodec::allYourBuffersAreBelongToUs(
         OMX_U32 portIndex) {
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
-        BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
+        BufferInfo *info = &mBuffers[portIndex][i];
 
         if (info->mStatus != BufferInfo::OWNED_BY_US
                 && info->mStatus != BufferInfo::OWNED_BY_NATIVE_WINDOW) {
@@ -5167,12 +5168,11 @@
 }
 
 void ACodec::processDeferredMessages() {
-    List<sp<AMessage> > queue = mDeferredQueue;
+    std::list<sp<AMessage>> queue = mDeferredQueue;
     mDeferredQueue.clear();
 
-    List<sp<AMessage> >::iterator it = queue.begin();
-    while (it != queue.end()) {
-        onMessageReceived(*it++);
+    for(const sp<AMessage> &msg : queue) {
+        onMessageReceived(msg);
     }
 }
 
@@ -5928,19 +5928,17 @@
 
         case ACodec::kWhatSetSurface:
         {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
             sp<RefBase> obj;
             CHECK(msg->findObject("surface", &obj));
 
             status_t err = mCodec->handleSetSurface(static_cast<Surface *>(obj.get()));
 
-            sp<AReplyToken> replyID;
-            if (msg->senderAwaitsResponse(&replyID)) {
-                sp<AMessage> response = new AMessage;
-                response->setInt32("err", err);
-                response->postReply(replyID);
-            } else if (err != OK) {
-                mCodec->signalError(OMX_ErrorUndefined, err);
-            }
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
             break;
         }
 
@@ -6483,7 +6481,7 @@
     BufferInfo *eligible = NULL;
 
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); ++i) {
-        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput][i];
 
 #if 0
         if (info->mStatus == BufferInfo::OWNED_BY_UPSTREAM) {
@@ -7515,7 +7513,7 @@
     // submit as many buffers as there are input buffers with the codec
     // in case we are in port reconfiguring
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); ++i) {
-        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput][i];
 
         if (info->mStatus == BufferInfo::OWNED_BY_COMPONENT) {
             if (mCodec->submitOutputMetadataBuffer() != OK)
@@ -7533,7 +7531,7 @@
 void ACodec::ExecutingState::submitRegularOutputBuffers() {
     bool failed = false;
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexOutput].size(); ++i) {
-        BufferInfo *info = &mCodec->mBuffers[kPortIndexOutput].editItemAt(i);
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexOutput][i];
 
         if (mCodec->mNativeWindow != NULL) {
             if (info->mStatus != BufferInfo::OWNED_BY_US
@@ -7590,7 +7588,7 @@
     }
 
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) {
-        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput][i];
         if (info->mStatus == BufferInfo::OWNED_BY_US) {
             postFillThisBuffer(info);
         }
@@ -8529,17 +8527,11 @@
 
         case kWhatSetSurface:
         {
-            ALOGV("[%s] Deferring setSurface", mCodec->mComponentName.c_str());
-
-            sp<AReplyToken> replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
+            ALOGD("[%s] Deferring setSurface from OutputPortSettingsChangedState",
+                  mCodec->mComponentName.c_str());
 
             mCodec->deferMessage(msg);
 
-            sp<AMessage> response = new AMessage;
-            response->setInt32("err", OK);
-            response->postReply(replyID);
-
             handled = true;
             break;
         }
@@ -8594,7 +8586,7 @@
                 ALOGV("[%s] Output port now disabled.", mCodec->mComponentName.c_str());
 
                 status_t err = OK;
-                if (!mCodec->mBuffers[kPortIndexOutput].isEmpty()) {
+                if (!mCodec->mBuffers[kPortIndexOutput].empty()) {
                     ALOGE("disabled port should be empty, but has %zu buffers",
                             mCodec->mBuffers[kPortIndexOutput].size());
                     err = FAILED_TRANSACTION;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index e4ba0b8..8c469df 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -321,7 +321,6 @@
         "libstagefright_webm",
         "libstagefright_timedtext",
         "libogg",
-        "libwebm",
         "libstagefright_id3",
         "framework-permission-aidl-cpp",
         "libmediandk_format",
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index a78fb30..c93d033 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -157,6 +157,8 @@
     bool isHevc() const { return mIsHevc; }
     bool isAv1() const { return mIsAv1; }
     bool isHeic() const { return mIsHeic; }
+    bool isAvif() const { return mIsAvif; }
+    bool isHeif() const { return mIsHeif; }
     bool isAudio() const { return mIsAudio; }
     bool isMPEG4() const { return mIsMPEG4; }
     bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic || mIsDovi; }
@@ -325,6 +327,8 @@
     bool mIsAudio;
     bool mIsVideo;
     bool mIsHeic;
+    bool mIsAvif;
+    bool mIsHeif;
     bool mIsMPEG4;
     bool mGotStartKeyFrame;
     bool mIsMalformed;
@@ -550,6 +554,7 @@
     mStreamableFile = false;
     mTimeScale = -1;
     mHasFileLevelMeta = false;
+    mIsAvif = false;
     mFileLevelMetaDataSize = 0;
     mPrimaryItemId = 0;
     mAssociationEntryCount = 0;
@@ -670,6 +675,8 @@
         return "mett";
     } else if (!strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
         return "heic";
+    } else if (!strcasecmp(MEDIA_MIMETYPE_IMAGE_AVIF, mime)) {
+        return "avif";
     } else {
         ALOGE("Track (%s) other than video/audio/metadata is not supported", mime);
     }
@@ -714,8 +721,9 @@
     Track *track = new Track(this, source, 1 + mTracks.size());
     mTracks.push_back(track);
 
-    mHasMoovBox |= !track->isHeic();
-    mHasFileLevelMeta |= track->isHeic();
+    mHasMoovBox |= !track->isHeif();
+    mHasFileLevelMeta |= track->isHeif();
+    mIsAvif |= track->isAvif();
 
     return OK;
 }
@@ -797,7 +805,7 @@
 
     for (List<Track *>::iterator it = mTracks.begin();
          it != mTracks.end(); ++it) {
-        if ((*it)->isHeic()) {
+        if ((*it)->isHeif()) {
             metaSize += (*it)->getMetaSizeIncrease(rotation, mTracks.size());
         }
     }
@@ -999,8 +1007,8 @@
         return err;
     }
 
-    ALOGV("muxer starting: mHasMoovBox %d, mHasFileLevelMeta %d",
-            mHasMoovBox, mHasFileLevelMeta);
+    ALOGV("muxer starting: mHasMoovBox %d, mHasFileLevelMeta %d, mIsAvif %d",
+            mHasMoovBox, mHasFileLevelMeta, mIsAvif);
 
     err = startWriterThread();
     if (err != OK) {
@@ -1316,7 +1324,7 @@
         }
 
         // skip image tracks
-        if ((*it)->isHeic()) continue;
+        if ((*it)->isHeif()) continue;
         nonImageTrackCount++;
 
         int64_t durationUs = (*it)->getDurationUs();
@@ -1494,7 +1502,7 @@
     int64_t minCttsOffsetTimeUs = kMaxCttsOffsetTimeUs;
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
-        if (!(*it)->isHeic()) {
+        if (!(*it)->isHeif()) {
             minCttsOffsetTimeUs =
                 std::min(minCttsOffsetTimeUs, (*it)->getMinCttsOffsetTimeUs());
         }
@@ -1510,7 +1518,7 @@
 
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
-        if (!(*it)->isHeic()) {
+        if (!(*it)->isHeif()) {
             (*it)->writeTrackHeader();
         }
     }
@@ -1530,17 +1538,27 @@
         writeFourcc("isom");
         writeFourcc("3gp4");
     } else {
-        // Only write "heic" as major brand if the client specified HEIF
-        // AND we indeed receive some image heic tracks.
+        // Only write "heic"/"avif" as major brand if the client specified HEIF/AVIF
+        // AND we indeed receive some image heic/avif tracks.
         if (fileType == OUTPUT_FORMAT_HEIF && mHasFileLevelMeta) {
-            writeFourcc("heic");
+            if (mIsAvif) {
+                writeFourcc("avif");
+            } else {
+                writeFourcc("heic");
+            }
         } else {
             writeFourcc("mp42");
         }
         writeInt32(0);
         if (mHasFileLevelMeta) {
-            writeFourcc("mif1");
-            writeFourcc("heic");
+            if (mIsAvif) {
+                writeFourcc("mif1");
+                writeFourcc("miaf");
+                writeFourcc("avif");
+            } else {
+                writeFourcc("mif1");
+                writeFourcc("heic");
+            }
         }
         if (mHasMoovBox) {
             writeFourcc("isom");
@@ -2117,7 +2135,8 @@
 
     for (List<Track *>::iterator it = mTracks.begin();
          it != mTracks.end(); ++it) {
-        if (!(*it)->isHeic() && (*it)->getDurationUs() >= mMaxFileDurationLimitUs) {
+        if (!(*it)->isHeif() &&
+                (*it)->getDurationUs() >= mMaxFileDurationLimitUs) {
             return true;
         }
     }
@@ -2224,6 +2243,8 @@
     mIsAudio = !strncasecmp(mime, "audio/", 6);
     mIsVideo = !strncasecmp(mime, "video/", 6);
     mIsHeic = !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+    mIsAvif = !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_AVIF);
+    mIsHeif = mIsHeic || mIsAvif;
     mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
 
@@ -2235,7 +2256,7 @@
         }
     }
 
-    if (!mIsHeic) {
+    if (!mIsHeif) {
         setTimeScale();
     } else {
         CHECK(mMeta->findInt32(kKeyWidth, &mWidth) && (mWidth > 0));
@@ -2316,7 +2337,7 @@
 
 void MPEG4Writer::Track::updateTrackSizeEstimate() {
     mEstimatedTrackSizeBytes = mMdatSizeBytes;  // media data size
-    if (!isHeic() && !mOwner->isFileStreamable()) {
+    if (!isHeif() && !mOwner->isFileStreamable()) {
         mEstimatedTrackSizeBytes += trackMetaDataSize();
     }
 }
@@ -2399,7 +2420,7 @@
 
 bool MPEG4Writer::Track::isExifData(
         MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const {
-    if (!mIsHeic) {
+    if (!mIsHeif) {
         return false;
     }
 
@@ -2428,12 +2449,12 @@
 }
 
 void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
-    CHECK(!mIsHeic);
+    CHECK(!mIsHeif);
     mCo64TableEntries->add(hton64(offset));
 }
 
 void MPEG4Writer::Track::addItemOffsetAndSize(off64_t offset, size_t size, bool isExif) {
-    CHECK(mIsHeic);
+    CHECK(mIsHeif);
 
     if (offset > UINT32_MAX || size > UINT32_MAX) {
         ALOGE("offset or size is out of range: %lld, %lld",
@@ -2479,8 +2500,10 @@
 
     if (mProperties.empty()) {
         mProperties.push_back(mOwner->addProperty_l({
-            .type = FOURCC('h', 'v', 'c', 'C'),
-            .hvcc = ABuffer::CreateAsCopy(mCodecSpecificData, mCodecSpecificDataSize)
+            .type = static_cast<uint32_t>(mIsAvif ?
+                  FOURCC('a', 'v', '1', 'C') :
+                  FOURCC('h', 'v', 'c', 'C')),
+            .data = ABuffer::CreateAsCopy(mCodecSpecificData, mCodecSpecificDataSize)
         }));
 
         mProperties.push_back(mOwner->addProperty_l({
@@ -2500,7 +2523,7 @@
     mTileIndex++;
     if (hasGrid) {
         mDimgRefs.value.push_back(mOwner->addItem_l({
-            .itemType = "hvc1",
+            .itemType = mIsAvif ? "av01" : "hvc1",
             .itemId = mItemIdBase++,
             .isPrimary = false,
             .isHidden = true,
@@ -2536,7 +2559,7 @@
         }
     } else {
         mImageItemId = mOwner->addItem_l({
-            .itemType = "hvc1",
+            .itemType = mIsAvif ? "av01" : "hvc1",
             .itemId = mItemIdBase++,
             .isPrimary = (mIsPrimary != 0),
             .isHidden = false,
@@ -2553,7 +2576,7 @@
 // it affects the 'dimg' refs for tiled image, as we only have the refs after the
 // last tile sample is written.
 void MPEG4Writer::Track::flushItemRefs() {
-    CHECK(mIsHeic);
+    CHECK(mIsHeif);
 
     if (mImageItemId > 0) {
         mOwner->addRefs_l(mImageItemId, mDimgRefs);
@@ -2654,7 +2677,8 @@
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
         mMeta->findData(kKeyHVCC, &type, &data, &size);
-    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1) ||
+               !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_AVIF)) {
         mMeta->findData(kKeyAV1C, &type, &data, &size);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
         getDolbyVisionProfile();
@@ -2766,7 +2790,7 @@
         size_t bytesWritten;
         off64_t offset = addSample_l(*it, usePrefix, tiffHdrOffset, &bytesWritten);
 
-        if (chunk->mTrack->isHeic()) {
+        if (chunk->mTrack->isHeif()) {
             chunk->mTrack->addItemOffsetAndSize(offset, bytesWritten, isExif);
         } else if (isFirstSample) {
             chunk->mTrack->addChunkOffset(offset);
@@ -2918,11 +2942,11 @@
     mStartTimeRealUs = startTimeUs;
 
     int32_t rotationDegrees;
-    if ((mIsVideo || mIsHeic) && params &&
+    if ((mIsVideo || mIsHeif) && params &&
             params->findInt32(kKeyRotation, &rotationDegrees)) {
         mRotation = rotationDegrees;
     }
-    if (mIsHeic) {
+    if (mIsHeif) {
         // Reserve the item ids, so that the item ids are ordered in the same
         // order that the image tracks are added.
         // If we leave the item ids to be assigned when the sample is written out,
@@ -3598,7 +3622,7 @@
         }
 
         // Per-frame metadata sample's size must be smaller than max allowed.
-        if (!mIsVideo && !mIsAudio && !mIsHeic &&
+        if (!mIsVideo && !mIsAudio && !mIsHeif &&
                 buffer->range_length() >= kMaxMetadataSize) {
             ALOGW("Buffer size is %zu. Maximum metadata buffer size is %lld for %s track",
                     buffer->range_length(), (long long)kMaxMetadataSize, trackName);
@@ -3722,7 +3746,7 @@
             mGotStartKeyFrame = true;
         }
 ////////////////////////////////////////////////////////////////////////////////
-        if (!mIsHeic) {
+        if (!mIsHeif) {
             if (mStszTableEntries->count() == 0) {
                 mFirstSampleTimeRealUs = systemTime() / 1000;
                 if (timestampUs < 0 && mFirstSampleStartOffsetUs == 0) {
@@ -3942,7 +3966,7 @@
             off64_t offset = mOwner->addSample_l(
                     copy, usePrefix, tiffHdrOffset, &bytesWritten);
 
-            if (mIsHeic) {
+            if (mIsHeif) {
                 addItemOffsetAndSize(offset, bytesWritten, isExif);
             } else {
                 if (mCo64TableEntries->count() == 0) {
@@ -3955,7 +3979,7 @@
         }
 
         mChunkSamples.push_back(copy);
-        if (mIsHeic) {
+        if (mIsHeif) {
             bufferChunk(0 /*timestampUs*/);
             ++nChunks;
         } else if (interleaveDurationUs == 0) {
@@ -3993,7 +4017,7 @@
 
     // Add final entries only for non-empty tracks.
     if (mStszTableEntries->count() > 0) {
-        if (mIsHeic) {
+        if (mIsHeif) {
             if (!mChunkSamples.empty()) {
                 bufferChunk(0);
                 ++nChunks;
@@ -4066,7 +4090,7 @@
         mOwner->mStartMeta->findInt32(kKeyEmptyTrackMalFormed, &emptyTrackMalformed) &&
         emptyTrackMalformed) {
         // MediaRecorder(sets kKeyEmptyTrackMalFormed by default) report empty tracks as malformed.
-        if (!mIsHeic && mStszTableEntries->count() == 0) {  // no samples written
+        if (!mIsHeif && mStszTableEntries->count() == 0) {  // no samples written
             ALOGE("The number of recorded samples is 0");
             mIsMalformed = true;
             return true;
@@ -4229,7 +4253,7 @@
 
 int32_t MPEG4Writer::Track::getMetaSizeIncrease(
         int32_t angle, int32_t trackCount) const {
-    CHECK(mIsHeic);
+    CHECK(mIsHeif);
 
     int32_t grid = (mTileWidth > 0);
     int32_t rotate = (angle > 0);
@@ -4281,7 +4305,8 @@
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime) ||
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_AV1, mime) ||
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime) ||
-        !strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
+        !strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime) ||
+        !strcasecmp(MEDIA_MIMETYPE_IMAGE_AVIF, mime)) {
         if (!mCodecSpecificData ||
             mCodecSpecificDataSize <= 0) {
             ALOGE("Missing codec specific data");
@@ -4300,7 +4325,7 @@
 const char *MPEG4Writer::Track::getTrackType() const {
     return mIsAudio ? "Audio" :
            mIsVideo ? "Video" :
-           mIsHeic  ? "Image" :
+           mIsHeif  ? "Image" :
                       "Metadata";
 }
 
@@ -5413,7 +5438,7 @@
             case FOURCC('h', 'v', 'c', 'C'):
             {
                 beginBox("hvcC");
-                sp<ABuffer> hvcc = mProperties[propIndex].hvcc;
+                sp<ABuffer> hvcc = mProperties[propIndex].data;
                 // Patch avcc's lengthSize field to match the number
                 // of bytes we use to indicate the size of a nal unit.
                 uint8_t *ptr = (uint8_t *)hvcc->data();
@@ -5422,6 +5447,14 @@
                 endBox();
                 break;
             }
+            case FOURCC('a', 'v', '1', 'C'):
+            {
+                beginBox("av1C");
+                sp<ABuffer> av1c = mProperties[propIndex].data;
+                write(av1c->data(), av1c->size());
+                endBox();
+                break;
+            }
             case FOURCC('i', 's', 'p', 'e'):
             {
                 beginBox("ispe");
@@ -5525,7 +5558,7 @@
 
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
-        if ((*it)->isHeic()) {
+        if ((*it)->isHeif()) {
             (*it)->flushItemRefs();
         }
     }
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
index 21dcfa1..2d9c651 100644
--- a/media/libstagefright/MediaAppender.cpp
+++ b/media/libstagefright/MediaAppender.cpp
@@ -308,7 +308,11 @@
         ALOGE("MediaAppender::start() is called in invalid state %d", mState);
         return INVALID_OPERATION;
     }
-    mMuxer = new (std::nothrow) MediaMuxer(mFd, mFormat);
+    mMuxer = MediaMuxer::create(mFd, mFormat);
+    if (mMuxer == nullptr) {
+        ALOGE("MediaMuxer::create failed");
+        return INVALID_OPERATION;
+    }
     for (const auto& n : mFmtIndexMap) {
         ssize_t muxIndex = mMuxer->addTrack(n.second);
         if (muxIndex < 0) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c963e19..ad6e36a 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -3355,8 +3355,8 @@
                     MediaCodecInfo::Attributes attr = mCodecInfo
                             ? mCodecInfo->getAttributes()
                             : MediaCodecInfo::Attributes(0);
-                    if (!(attr & MediaCodecInfo::kFlagIsSoftwareOnly)) {
-                        // software codec is currently ignored.
+                    if (mDomain == DOMAIN_VIDEO || !(attr & MediaCodecInfo::kFlagIsSoftwareOnly)) {
+                        // software audio codecs are currently ignored.
                         mResourceManagerProxy->addResource(MediaResource::CodecResource(
                             mFlags & kFlagIsSecure, toMediaResourceSubType(mDomain)));
                     }
@@ -5289,7 +5289,7 @@
 MediaCodec::BufferInfo *MediaCodec::peekNextPortBuffer(int32_t portIndex) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
-    List<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
+    std::list<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
 
     if (availBuffers->empty()) {
         return nullptr;
@@ -5306,7 +5306,7 @@
         return -EAGAIN;
     }
 
-    List<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
+    std::list<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
     size_t index = *availBuffers->begin();
     CHECK_EQ(info, &mPortBuffers[portIndex][index]);
     availBuffers->erase(availBuffers->begin());
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index a3040f4..78b7288 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -354,8 +354,19 @@
 
 //static
 void MediaCodecList::findMatchingCodecs(
-        const char *mime, bool encoder, uint32_t flags, sp<AMessage> format,
+        const char *mime, bool encoder, uint32_t flags, const sp<AMessage> &format,
         Vector<AString> *matches) {
+    findMatchingCodecs(mime, encoder, flags, format, matches, /* checkProfile= */ true);
+    if (matches->empty()) {
+        ALOGV("no matching codec found, retrying without profile check");
+        findMatchingCodecs(mime, encoder, flags, format, matches, /* checkProfile= */ false);
+    }
+}
+
+//static
+void MediaCodecList::findMatchingCodecs(
+        const char *mime, bool encoder, uint32_t flags, const sp<AMessage> &format,
+        Vector<AString> *matches, bool checkProfile) {
     matches->clear();
 
     const sp<IMediaCodecList> list = getInstance();
@@ -379,7 +390,7 @@
 
         AString componentName = info->getCodecName();
 
-        if (!codecHandlesFormat(mime, info, format)) {
+        if (!codecHandlesFormat(mime, info, format, checkProfile)) {
             ALOGV("skipping codec '%s' which doesn't satisfy format %s",
                   componentName.c_str(), format->debugString(2).c_str());
             continue;
@@ -400,9 +411,10 @@
     }
 }
 
-/*static*/
-bool MediaCodecList::codecHandlesFormat(const char *mime, sp<MediaCodecInfo> info,
-                                        sp<AMessage> format) {
+// static
+bool MediaCodecList::codecHandlesFormat(
+        const char *mime, const sp<MediaCodecInfo> &info, const sp<AMessage> &format,
+        bool checkProfile) {
 
     if (format == nullptr) {
         ALOGD("codecHandlesFormat: no format, so no extra checks");
@@ -510,9 +522,7 @@
         }
 
         int32_t profile = -1;
-        if (format->findInt32("profile", &profile)) {
-            int32_t level = -1;
-            format->findInt32("level", &level);
+        if (checkProfile && format->findInt32("profile", &profile)) {
             Vector<MediaCodecInfo::ProfileLevel> profileLevels;
             capabilities->getSupportedProfileLevels(&profileLevels);
             auto it = profileLevels.begin();
@@ -520,14 +530,11 @@
                 if (profile != it->mProfile) {
                     continue;
                 }
-                if (level > -1 && level > it->mLevel) {
-                    continue;
-                }
                 break;
             }
 
             if (it == profileLevels.end()) {
-                ALOGV("Codec does not support profile %d with level %d", profile, level);
+                ALOGV("Codec does not support profile %d", profile);
                 return false;
             }
         }
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index a946f71..9768f97 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -46,6 +46,30 @@
            format == MediaMuxer::OUTPUT_FORMAT_HEIF;
 }
 
+MediaMuxer* MediaMuxer::create(int fd, OutputFormat format) {
+    bool isInputValid = true;
+    if (isMp4Format(format)) {
+        isInputValid = MPEG4Writer::isFdOpenModeValid(fd);
+    } else if (format == OUTPUT_FORMAT_WEBM) {
+        isInputValid = WebmWriter::isFdOpenModeValid(fd);
+    } else if (format == OUTPUT_FORMAT_OGG) {
+        isInputValid = OggWriter::isFdOpenModeValid(fd);
+    } else {
+        ALOGE("MediaMuxer does not support output format %d", format);
+        return nullptr;
+    }
+    if (!isInputValid) {
+        ALOGE("File descriptor is not suitable for format %d", format);
+        return nullptr;
+    }
+
+    MediaMuxer *muxer = new (std::nothrow) MediaMuxer(fd, (MediaMuxer::OutputFormat)format);
+    if (muxer == nullptr) {
+        ALOGE("Failed to create writer object");
+    }
+    return muxer;
+}
+
 MediaMuxer::MediaMuxer(int fd, OutputFormat format)
     : mFormat(format),
       mState(UNINITIALIZED) {
@@ -138,7 +162,7 @@
         return INVALID_OPERATION;
     }
     if (!isMp4Format(mFormat)) {
-        ALOGE("setLocation() is only supported for .mp4, .3gp or .heic output.");
+        ALOGE("setLocation() is only supported for .mp4, .3gp, .heic or .avif output.");
         return INVALID_OPERATION;
     }
 
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index fd30b35..c5b5199 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -796,6 +796,8 @@
         { "valid-samples", kKeyValidSamples },
         { "dvb-component-tag", kKeyDvbComponentTag},
         { "dvb-audio-description", kKeyDvbAudioDescription},
+        { "dvb-teletext-magazine-number", kKeyDvbTeletextMagazineNumber},
+        { "dvb-teletext-page-number", kKeyDvbTeletextPageNumber},
     }
 };
 
@@ -1014,6 +1016,16 @@
         msg->setInt32("dvb-audio-description", dvbAudioDescription);
     }
 
+    int32_t dvbTeletextMagazineNumber = 0;
+    if (meta->findInt32(kKeyDvbTeletextMagazineNumber, &dvbTeletextMagazineNumber)) {
+        msg->setInt32("dvb-teletext-magazine-number", dvbTeletextMagazineNumber);
+    }
+
+    int32_t dvbTeletextPageNumber = 0;
+    if (meta->findInt32(kKeyDvbTeletextPageNumber, &dvbTeletextPageNumber)) {
+        msg->setInt32("dvb-teletext-page-number", dvbTeletextPageNumber);
+    }
+
     const char *lang;
     if (meta->findCString(kKeyMediaLanguage, &lang)) {
         msg->setString("language", lang);
@@ -1810,6 +1822,16 @@
         meta->setInt32(kKeyDvbAudioDescription, dvbAudioDescription);
     }
 
+    int32_t dvbTeletextMagazineNumber = 0;
+    if (msg->findInt32("dvb-teletext-magazine-number", &dvbTeletextMagazineNumber)) {
+        meta->setInt32(kKeyDvbTeletextMagazineNumber, dvbTeletextMagazineNumber);
+    }
+
+    int32_t dvbTeletextPageNumber = 0;
+    if (msg->findInt32("dvb-teletext-page-number", &dvbTeletextPageNumber)) {
+        meta->setInt32(kKeyDvbTeletextPageNumber, dvbTeletextPageNumber);
+    }
+
     AString lang;
     if (msg->findString("language", &lang)) {
         meta->setCString(kKeyMediaLanguage, lang.c_str());
diff --git a/media/libstagefright/colorconversion/fuzzer/Android.bp b/media/libstagefright/colorconversion/fuzzer/Android.bp
new file mode 100644
index 0000000..76b054a
--- /dev/null
+++ b/media/libstagefright/colorconversion/fuzzer/Android.bp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_media_libstagefright_colorconversion_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: [
+        "frameworks_av_media_libstagefright_colorconversion_license",
+    ],
+}
+
+cc_defaults {
+    name: "libcolorconversion_fuzzer_defaults",
+    static_libs: [
+        "libyuv_static",
+        "libstagefright_color_conversion",
+        "libstagefright",
+        "liblog",
+    ],
+    header_libs: [
+        "libstagefright_headers",
+        "libgui_headers",
+    ],
+    shared_libs: [
+        "libui",
+        "libnativewindow",
+        "libstagefright_codecbase",
+        "libstagefright_foundation",
+        "libutils",
+        "libgui",
+        "libbinder",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
+
+cc_fuzz {
+    name: "color_conversion_fuzzer",
+    srcs: [
+        "color_conversion_fuzzer.cpp",
+    ],
+    defaults: [
+         "libcolorconversion_fuzzer_defaults",
+    ],
+}
diff --git a/media/libstagefright/colorconversion/fuzzer/README.md b/media/libstagefright/colorconversion/fuzzer/README.md
new file mode 100644
index 0000000..220f749
--- /dev/null
+++ b/media/libstagefright/colorconversion/fuzzer/README.md
@@ -0,0 +1,28 @@
+# Fuzzers for libstagefright_color_conversion
+
+## Table of contents
++ [color_conversion_fuzzer](#ColorConversion)
+
+
+# <a name="ColorConversion"></a> Fuzzer for  Colorconversion
+
+ColorConversion supports the following parameters:
+1. SrcColorFormatType (parameter name: "kSrcFormatType")
+2. DstColorFormatType (parameter name: "kDstFormatType")
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+|`kSrcFormatType`| 0. `OMX_COLOR_FormatYUV420Planar`<br/>1. `OMX_COLOR_FormatYUV420Planar16`<br/>2. `OMX_COLOR_FormatYUV420SemiPlanar` <br/>3. `OMX_TI_COLOR_FormatYUV420PackedSemiPlanar` <br/>4.`OMX_COLOR_FormatCbYCrY`<br/>5.`OMX_QCOM_COLOR_FormatYVU420SemiPlanar`<br/>6.`COLOR_FormatYUVP010`|Value obtained from FuzzedDataProvider|
+|`kDstFormatType`| 0. `OMX_COLOR_Format16bitRGB565`<br/>1. `OMX_COLOR_Format32BitRGBA8888`<br/>2. `OMX_COLOR_Format32bitBGRA8888` <br/>3. `OMX_COLOR_Format16bitRGB565` <br/>4. `OMX_COLOR_Format32bitBGRA8888`<br/>5.`OMX_COLOR_FormatYUV444Y410`<br/>6. `COLOR_Format32bitABGR2101010`|Value obtained from FuzzedDataProvider|
+
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) color_conversion_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/color_conversion_fuzzer/color_conversion_fuzzer
+```
diff --git a/media/libstagefright/colorconversion/fuzzer/color_conversion_fuzzer.cpp b/media/libstagefright/colorconversion/fuzzer/color_conversion_fuzzer.cpp
new file mode 100644
index 0000000..7c2bfe5
--- /dev/null
+++ b/media/libstagefright/colorconversion/fuzzer/color_conversion_fuzzer.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <iostream>
+#include <vector>
+#include "fuzzer/FuzzedDataProvider.h"
+
+using namespace android;
+using ::android::sp;
+
+static constexpr int32_t kMinFrameSize = 2;
+static constexpr int32_t kMaxFrameSize = 8192;
+
+static constexpr int32_t kSrcFormatType[] = {OMX_COLOR_FormatYUV420Planar,
+                                             OMX_COLOR_FormatYUV420Planar16,
+                                             OMX_COLOR_FormatYUV420SemiPlanar,
+                                             OMX_TI_COLOR_FormatYUV420PackedSemiPlanar,
+                                             OMX_COLOR_FormatCbYCrY,
+                                             OMX_QCOM_COLOR_FormatYVU420SemiPlanar,
+                                             COLOR_FormatYUVP010};
+
+static constexpr int32_t kDstFormatType[] = {
+        OMX_COLOR_Format16bitRGB565, OMX_COLOR_Format32BitRGBA8888, OMX_COLOR_Format32bitBGRA8888,
+        OMX_COLOR_FormatYUV444Y410, COLOR_Format32bitABGR2101010};
+
+class ColorConversionFuzzer {
+  public:
+    ColorConversionFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+  private:
+    FuzzedDataProvider mFdp;
+    int32_t getFrameSize(OMX_COLOR_FORMATTYPE colorFormat, int32_t stride, int32_t height);
+    bool isValidFormat(OMX_COLOR_FORMATTYPE srcFormat, OMX_COLOR_FORMATTYPE dstFormat);
+};
+
+int32_t ColorConversionFuzzer::getFrameSize(OMX_COLOR_FORMATTYPE colorFormat, int32_t stride,
+                                          int32_t height) {
+    int32_t frameSize;
+    switch ((int32_t)colorFormat) {
+        case OMX_COLOR_Format16bitRGB565: {
+            frameSize = 2 * stride * height;
+            break;
+        }
+        case OMX_COLOR_FormatYUV420Planar16:
+        case COLOR_FormatYUVP010:
+        case OMX_COLOR_FormatYUV444Y410: {
+            frameSize = 3 * stride * height;
+            break;
+        }
+        case OMX_COLOR_Format32bitBGRA8888:
+        case OMX_COLOR_Format32BitRGBA8888:
+        case COLOR_Format32bitABGR2101010: {
+            frameSize = 4 * stride * height;
+            break;
+        }
+        case OMX_COLOR_FormatYUV420Planar:
+        case OMX_COLOR_FormatYUV420SemiPlanar:
+        case OMX_COLOR_FormatCbYCrY:
+        case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
+        case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
+        default: {
+            frameSize = stride * height + 2 * (((stride + 1) / 2) * ((height + 1) / 2));
+            break;
+        }
+    }
+    return frameSize;
+}
+
+void ColorConversionFuzzer::process() {
+    OMX_COLOR_FORMATTYPE srcColorFormat =
+            static_cast<OMX_COLOR_FORMATTYPE>(mFdp.PickValueInArray(kSrcFormatType));
+    OMX_COLOR_FORMATTYPE dstColorFormat =
+            static_cast<OMX_COLOR_FORMATTYPE>(mFdp.PickValueInArray(kDstFormatType));
+    std::unique_ptr<ColorConverter> converter(new ColorConverter(srcColorFormat, dstColorFormat));
+    if (converter->isValid()) {
+        int32_t srcLeft, srcTop, srcRight, srcBottom, width, height, stride;
+        width = mFdp.ConsumeIntegralInRange<int32_t>(kMinFrameSize, kMaxFrameSize);
+        height = mFdp.ConsumeIntegralInRange<int32_t>(kMinFrameSize, kMaxFrameSize);
+        stride = mFdp.ConsumeIntegralInRange<int32_t>(width, 2 * kMaxFrameSize);
+
+        srcLeft = mFdp.ConsumeIntegralInRange<int32_t>(0, width - 1);
+        srcTop = mFdp.ConsumeIntegralInRange<int32_t>(0, height - 1);
+        srcRight = mFdp.ConsumeIntegralInRange<int32_t>(srcLeft, width - 1);
+        srcBottom = mFdp.ConsumeIntegralInRange<int32_t>(srcTop, height - 1);
+
+        int32_t dstLeft, dstTop, dstRight, dstBottom;
+        dstLeft = mFdp.ConsumeIntegralInRange<int32_t>(0, width - 1);
+        dstTop = mFdp.ConsumeIntegralInRange<int32_t>(0, height - 1);
+        dstRight = mFdp.ConsumeIntegralInRange<int32_t>(dstLeft, width - 1);
+        dstBottom = mFdp.ConsumeIntegralInRange<int32_t>(dstTop, height - 1);
+
+        int32_t srcFrameSize = getFrameSize(srcColorFormat, stride, height);
+        int32_t dstFrameSize = getFrameSize(dstColorFormat, stride, height);
+        std::vector<uint8_t> srcFrame(srcFrameSize), dstFrame(dstFrameSize);
+        mFdp.ConsumeData(srcFrame.data(), srcFrameSize);
+        converter->convert(srcFrame.data(), width, height, stride, srcLeft, srcTop, srcRight,
+                           srcBottom, dstFrame.data(), width, height, stride, dstLeft, dstTop,
+                           dstRight, dstBottom);
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    ColorConversionFuzzer colorConversionFuzzer(data, size);
+    colorConversionFuzzer.process();
+    return 0;
+}
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 5a21755..38a4c1e 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -18,6 +18,8 @@
 #define A_CODEC_H_
 
 #include <stdint.h>
+#include <list>
+#include <vector>
 #include <android/native_window.h>
 #include <media/hardware/MetadataBufferType.h>
 #include <media/MediaCodecInfo.h>
@@ -265,11 +267,11 @@
     sp<AMessage> mBaseOutputFormat;
 
     FrameRenderTracker mRenderTracker; // render information for buffers rendered by ACodec
-    Vector<BufferInfo> mBuffers[2];
+    std::vector<BufferInfo> mBuffers[2];
     bool mPortEOS[2];
     status_t mInputEOSResult;
 
-    List<sp<AMessage> > mDeferredQueue;
+    std::list<sp<AMessage>> mDeferredQueue;
 
     sp<AMessage> mLastOutputFormat;
     bool mIsVideo;
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 7c3eca6..cf76606 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -196,7 +196,9 @@
     typedef key_value_pair_t< const char *, Vector<uint16_t> > ItemRefs;
     typedef struct _ItemInfo {
         bool isGrid() const { return !strcmp("grid", itemType); }
-        bool isImage() const { return !strcmp("hvc1", itemType) || isGrid(); }
+        bool isImage() const {
+            return !strcmp("hvc1", itemType) || !strcmp("av01", itemType) || isGrid();
+        }
         const char *itemType;
         uint16_t itemId;
         bool isPrimary;
@@ -224,10 +226,11 @@
         int32_t width;
         int32_t height;
         int32_t rotation;
-        sp<ABuffer> hvcc;
+        sp<ABuffer> data;
     } ItemProperty;
 
     bool mHasFileLevelMeta;
+    bool mIsAvif; // used to differentiate HEIC and AVIF under the same OUTPUT_FORMAT_HEIF
     uint64_t mFileLevelMetaDataSize;
     bool mHasMoovBox;
     uint32_t mPrimaryItemId;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 1d2d711..6f6a4e6 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -18,6 +18,7 @@
 
 #define MEDIA_CODEC_H_
 
+#include <list>
 #include <memory>
 #include <vector>
 
@@ -483,7 +484,7 @@
     // stop/flush/reset/release.
     Mutex mBufferLock;
 
-    List<size_t> mAvailPortBuffers[2];
+    std::list<size_t> mAvailPortBuffers[2];
     std::vector<BufferInfo> mPortBuffers[2];
 
     int32_t mDequeueInputTimeoutGeneration;
@@ -501,7 +502,7 @@
 
     sp<IDescrambler> mDescrambler;
 
-    List<sp<ABuffer> > mCSD;
+    std::list<sp<ABuffer> > mCSD;
 
     sp<AMessage> mActivityNotify;
 
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecList.h b/media/libstagefright/include/media/stagefright/MediaCodecList.h
index 3cf455c..56c6a45 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecList.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecList.h
@@ -80,11 +80,9 @@
             const char *mime,
             bool createEncoder,
             uint32_t flags,
-            sp<AMessage> format,
+            const sp<AMessage> &format,
             Vector<AString> *matchingCodecs);
 
-    static bool codecHandlesFormat(const char *mime, sp<MediaCodecInfo> info, sp<AMessage> format);
-
     static bool isSoftwareCodec(const AString &componentName);
 
 private:
@@ -115,6 +113,20 @@
 
     MediaCodecList(const MediaCodecList&) = delete;
     MediaCodecList& operator=(const MediaCodecList&) = delete;
+
+    static void findMatchingCodecs(
+            const char *mime,
+            bool createEncoder,
+            uint32_t flags,
+            const sp<AMessage> &format,
+            Vector<AString> *matchingCodecs,
+            bool checkProfile);
+
+    static bool codecHandlesFormat(
+            const char *mime,
+            const sp<MediaCodecInfo> &info,
+            const sp<AMessage> &format,
+            bool checkProfile);
 };
 
 }  // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index e97a65e..33aaf11 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -48,9 +48,13 @@
 // deleting the output file after stop.
 struct MediaMuxer : public MediaMuxerBase {
 public:
-    // Construct the muxer with the file descriptor. Note that the MediaMuxer
-    // will close this file at stop().
-    MediaMuxer(int fd, OutputFormat format);
+    /**
+     * Creates the muxer for a given output format.
+     * @param fd : file descriptor of the output file.
+     * @param format : output format of the muxer. e.g.: webm/mp4/ogg
+     * @return writer's object or nullptr if error.
+     */
+    static MediaMuxer* create(int fd, OutputFormat format);
 
     virtual ~MediaMuxer();
 
@@ -127,6 +131,11 @@
     sp<AMessage> getTrackFormat(size_t idx);
 
 private:
+    // Construct the muxer with the file descriptor. Note that the MediaMuxer
+    // will close this file at stop().
+    // This constructor is made private to ensure that MediaMuxer::create() is used instead.
+    MediaMuxer(int fd, OutputFormat format);
+
     const OutputFormat mFormat;
     sp<MediaWriter> mWriter;
     Vector< sp<MediaAdapter> > mTrackList;  // Each track has its MediaAdapter.
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index 9f20185..2b14811 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -31,6 +31,29 @@
           mMaxFileDurationLimitUs(0) {
     }
 
+    // Returns true if the file descriptor is opened using a mode
+    // which meets minimum writer/muxer requirements.
+    static bool isFdOpenModeValid(int fd) {
+        // check for invalid file descriptor.
+        int flags = fcntl(fd, F_GETFL);
+        if (flags == -1) {
+            ALOGE("Invalid File Status Flags and/or mode : %d", flags);
+            return false;
+        }
+        // fd must be in read-write mode or write-only mode.
+        if ((flags & (O_RDWR | O_WRONLY)) == 0) {
+            ALOGE("File must be writable");
+            return false;
+        }
+        // Verify fd is seekable
+        off64_t off = lseek64(fd, 0, SEEK_SET);
+        if (off < 0) {
+            ALOGE("File descriptor is not seekable");
+            return false;
+        }
+        return true;
+    }
+
     virtual status_t addSource(const sp<MediaSource> &source) = 0;
     virtual bool reachedEOS() = 0;
     virtual status_t start(MetaData *params = NULL) = 0;
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index cdf8d35..2ca0e33 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -267,6 +267,7 @@
     kKeyRtpExtMap        = 'extm', // int32_t, rtp extension ID for cvo on RTP protocol.
     kKeyRtpCvoDegrees    = 'cvod', // int32_t, rtp cvo degrees as per 3GPP 26.114.
     kKeyRtpDscp          = 'dscp', // int32_t, DSCP(Differentiated services codepoint) of RFC 2474.
+    kKeyRtpEcn           = 'sEcn', // int32_t, ECN (Explicit Congestion Notification) of RFC 3168
     kKeySocketNetwork    = 'sNet', // int64_t, socket will be bound to network handle.
 
     // Slow-motion markers
@@ -283,6 +284,12 @@
     // DVB audio description
     kKeyDvbAudioDescription = 'addt', // bool (int32_t), DVB audio description only defined for
                                       // audio component
+
+    // DVB teletext magazine number
+    kKeyDvbTeletextMagazineNumber = 'ttxm', // int32_t, DVB teletext magazine number
+
+    // DVB teletext page number
+    kKeyDvbTeletextPageNumber = 'ttxp', // int32_t, DVB teletext page number
 };
 
 enum {
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index ddf797c..88f7be7 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -332,6 +332,11 @@
 }
 
 bool AAVCAssembler::dropFramesUntilIframe(const sp<ABuffer> &buffer) {
+    if (buffer->size() == 0) {
+        ALOGE("b/230630526 buffer->size() == 0");
+        android_errorWriteLog(0x534e4554, "230630526");
+        return false;
+    }
     const uint8_t *data = buffer->data();
     unsigned nalType = data[0] & 0x1f;
     if (!mFirstIFrameProvided && nalType < 0x5) {
@@ -624,8 +629,7 @@
     int32_t firstSeqNo = buffer->int32Data();
 
     // This only works for FU-A type & non-start sequence
-    int32_t nalType = buffer->size() >= 1 ? buffer->data()[0] & 0x1f : -1;
-    if (nalType != 28 || (buffer->size() >= 2 && buffer->data()[1] & 0x80)) {
+    if (buffer->size() < 2 || (buffer->data()[0] & 0x1f) != 28 || buffer->data()[1] & 0x80) {
         return firstSeqNo;
     }
 
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index bb42d1f..72dd981 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -629,13 +629,13 @@
 
 int32_t AHEVCAssembler::pickStartSeq(const Queue *queue,
         uint32_t first, int64_t play, int64_t jit) {
+    CHECK(!queue->empty());
     // pick the first sequence number has the start bit.
     sp<ABuffer> buffer = *(queue->begin());
     int32_t firstSeqNo = buffer->int32Data();
 
     // This only works for FU-A type & non-start sequence
-    unsigned nalType = buffer->data()[0] & 0x1f;
-    if (nalType != 28 || buffer->data()[2] & 0x80) {
+    if (buffer->size() < 3 || (buffer->data()[0] & 0x1f) != 28 || buffer->data()[2] & 0x80) {
         return firstSeqNo;
     }
 
@@ -645,7 +645,7 @@
         if (rtpTime + jit >= play) {
             break;
         }
-        if ((data[2] & 0x80)) {
+        if (it->size() >= 3 && (data[2] & 0x80)) {
             const int32_t seqNo = it->int32Data();
             ALOGE("finding [HEAD] pkt. \t Seq# (%d ~ )[%d", firstSeqNo, seqNo);
             firstSeqNo = seqNo;
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index a61f48f..165c336 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -16,6 +16,12 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ARTPConnection"
+#define INET_ECN_NOT_ECT    0x00    /* ECN was not enabled */
+#define INET_ECN_ECT_1      0x01    /* ECN capable packet */
+#define INET_ECN_ECT_0      0x02    /* ECN capable packet */
+#define INET_ECN_CE         0x03    /* ECN congestion */
+#define INET_ECN_MASK       0x03    /* Mask of ECN bits */
+
 #include <utils/Log.h>
 
 #include <media/stagefright/rtsp/ARTPAssembler.h>
@@ -56,6 +62,7 @@
 
 // static
 const int64_t ARTPConnection::kSelectTimeoutUs = 1000LL;
+const int64_t ARTPConnection::kMinOneSecondNotifyDelayUs = 100000ll;
 
 struct ARTPConnection::StreamInfo {
     bool isIPv6;
@@ -84,7 +91,10 @@
       mPollEventPending(false),
       mLastReceiverReportTimeUs(-1),
       mLastBitrateReportTimeUs(-1),
+      mLastCongestionNotifyTimeUs(-1),
       mTargetBitrate(-1),
+      mRtpSockOptEcn(0),
+      mIsIPv6(false),
       mStaticJitterTimeMs(kStaticJitterTimeMs) {
 }
 
@@ -175,7 +185,7 @@
 // static
 void ARTPConnection::MakeRTPSocketPair(
         int *rtpSocket, int *rtcpSocket, const char *localIp, const char *remoteIp,
-        unsigned localPort, unsigned remotePort, int64_t socketNetwork) {
+        unsigned localPort, unsigned remotePort, int64_t socketNetwork, int32_t sockOptEcn) {
     bool isIPv6 = false;
     if (strchr(localIp, ':') != NULL)
         isIPv6 = true;
@@ -204,6 +214,24 @@
         }
     }
 
+    if (sockOptEcn != 0) {
+        int sockOptForTOS = 1;
+        if (setsockopt(*rtpSocket, isIPv6 ? IPPROTO_IPV6 : IPPROTO_IP,
+               isIPv6 ? IPV6_RECVTCLASS : IP_RECVTOS,
+               (int *)&sockOptForTOS, sizeof(sockOptForTOS)) < 0) {
+            ALOGE("failed to set recv sockopt TOS on rtpsock(%d). err=%s", *rtpSocket,
+                strerror(errno));
+        } else {
+            ALOGD("successfully set recv sockopt TOS on rtpsock(%d)", *rtpSocket);
+            int result = setsockopt(*rtcpSocket, isIPv6 ? IPPROTO_IPV6 : IPPROTO_IP,
+                isIPv6 ? IPV6_RECVTCLASS : IP_RECVTOS,
+                (int *)&sockOptForTOS, sizeof(sockOptForTOS));
+            if (result >= 0) {
+                ALOGD("successfully set recv sockopt TOS on rtcpsock(%d).", *rtcpSocket);
+            }
+        }
+    }
+
     bumpSocketBufferSize(*rtcpSocket);
 
     struct sockaddr *addr;
@@ -593,32 +621,25 @@
 
     sp<ABuffer> buffer = new ABuffer(65536);
 
-    struct sockaddr *pRemoteRTCPAddr;
-    int sizeSockSt;
-    if (s->isIPv6) {
-        pRemoteRTCPAddr = (struct sockaddr *)&s->mRemoteRTCPAddr6;
-        sizeSockSt = sizeof(struct sockaddr_in6);
-    } else {
-        pRemoteRTCPAddr = (struct sockaddr *)&s->mRemoteRTCPAddr;
-        sizeSockSt = sizeof(struct sockaddr_in);
-    }
-    socklen_t remoteAddrLen =
-        (!receiveRTP && s->mNumRTCPPacketsReceived == 0)
-            ? sizeSockSt : 0;
+    struct msghdr sMsg = {};
+    struct iovec sIov[1] = {};
 
-    if (mFlags & kViLTEConnection) {
-        remoteAddrLen = 0;
-    }
+    sIov[0].iov_base = (char *) buffer->data();
+    sIov[0].iov_len = buffer->capacity();
+
+    sMsg.msg_iov = sIov;
+    sMsg.msg_iovlen = 1;
+
+    int cMsgSize = sizeof(struct cmsghdr) + sizeof(uint8_t);
+    char buf[CMSG_SPACE(cMsgSize)];
+    sMsg.msg_control = buf;
+    sMsg.msg_controllen = sizeof(buf);
+    sMsg.msg_flags = 0;
 
     ssize_t nbytes;
     do {
-        nbytes = recvfrom(
-            receiveRTP ? s->mRTPSocket : s->mRTCPSocket,
-            buffer->data(),
-            buffer->capacity(),
-            0,
-            remoteAddrLen > 0 ? pRemoteRTCPAddr : NULL,
-            remoteAddrLen > 0 ? &remoteAddrLen : NULL);
+        // Used recvmsg to get the TOS header of incoming packet
+        nbytes = recvmsg(receiveRTP ? s->mRTPSocket : s->mRTCPSocket, &sMsg, 0);
         mCumulativeBytes += nbytes;
     } while (nbytes < 0 && errno == EINTR);
 
@@ -633,6 +654,10 @@
         }
     }
 
+    if (nbytes > 0) {
+        handleIpHeadersIfReceived(s, sMsg);
+    }
+
     buffer->setRange(0, nbytes);
 
     // ALOGI("received %d bytes.", buffer->size());
@@ -647,13 +672,68 @@
     return err;
 }
 
+/* This function will check if TOS is present or not in received IP packet.
+ * After that if it is present then it will notify about congestion to upper
+ * layer if CE bit is set in TOS header.
+ **/
+void ARTPConnection::handleIpHeadersIfReceived(StreamInfo *s, struct msghdr sMsg) {
+    struct cmsghdr *cMsg;
+    cMsg = CMSG_FIRSTHDR(&sMsg);
+
+    if (cMsg == NULL) {
+        ALOGV("cmsg is null");
+    }
+
+    for (; cMsg != NULL; cMsg = CMSG_NXTHDR(&sMsg, cMsg)) {
+        bool isTOSHeader = ((cMsg->cmsg_level == (mIsIPv6 ? IPPROTO_IPV6 : IPPROTO_IP))
+                              && (cMsg->cmsg_type == (mIsIPv6 ? IPV6_TCLASS : IP_TOS))
+                              && (cMsg->cmsg_len));
+        if (isTOSHeader) {
+            uint8_t receivedTOS;
+            receivedTOS = *((uint8_t *) CMSG_DATA(cMsg));
+            // checking CE bit is set
+            bool isCEBitMarked = ((receivedTOS & INET_ECN_MASK) == INET_ECN_CE);
+
+            ALOGV("receivedTos(value -> %d)", receivedTOS);
+
+            if (isCEBitMarked) {
+                ALOGD("receivedTos(value -> %d), is ECN CE marked = %d",
+                    receivedTOS, isCEBitMarked);
+                notifyCongestionToUpperLayerIfNeeded(s);
+            }
+            break;
+        }
+    }
+}
+
+/* this function will be use to notify congestion in video call to upper layer */
+void ARTPConnection::notifyCongestionToUpperLayerIfNeeded(StreamInfo *s) {
+    int64_t nowUs = ALooper::GetNowUs();
+
+    if (mLastCongestionNotifyTimeUs <= 0) {
+        mLastCongestionNotifyTimeUs = nowUs;
+    }
+
+    bool isNeedToUpdate = (mLastCongestionNotifyTimeUs + kMinOneSecondNotifyDelayUs <= nowUs);
+    ALOGD("ECN info set by upper layer=%d, isNeedToUpdate=%d", mRtpSockOptEcn, isNeedToUpdate);
+
+    if ((mRtpSockOptEcn != 0) && (isNeedToUpdate)) {
+        sp<AMessage> notify = s->mNotifyMsg->dup();
+        notify->setInt32("rtcp-event", 1);
+        notify->setInt32("payload-type", ARTPSource::RTP_QUALITY_CD);
+        notify->post();
+        mLastCongestionNotifyTimeUs = nowUs;
+        ALOGD("Congestion detected in n/w, Notify upper layer");
+    }
+}
+
 ssize_t ARTPConnection::send(const StreamInfo *info, const sp<ABuffer> buffer) {
         struct sockaddr* pRemoteRTCPAddr;
         int sizeSockSt;
 
         /* It seems this isIPv6 variable is useless.
          * We should remove it to prevent confusion */
-        if (info->isIPv6) {
+        if (mIsIPv6) {
             pRemoteRTCPAddr = (struct sockaddr *)&info->mRemoteRTCPAddr6;
             sizeSockSt = sizeof(struct sockaddr_in6);
         } else {
@@ -1215,12 +1295,20 @@
     mTargetBitrate = targetBitrate;
 }
 
+void ARTPConnection::setRtpSockOptEcn(int32_t sockOptEcn) {
+    mRtpSockOptEcn = sockOptEcn;
+}
+
+void ARTPConnection::setIsIPv6(const char *localIp) {
+    mIsIPv6 = (strchr(localIp, ':') != nullptr);
+}
+
 void ARTPConnection::checkRxBitrate(int64_t nowUs) {
     if (mLastBitrateReportTimeUs <= 0) {
         mCumulativeBytes = 0;
         mLastBitrateReportTimeUs = nowUs;
     }
-    else if (mLastEarlyNotifyTimeUs + 100000ll <= nowUs) {
+    else if (mLastEarlyNotifyTimeUs + kMinOneSecondNotifyDelayUs <= nowUs) {
         int32_t timeDiff = (nowUs - mLastBitrateReportTimeUs) / 1000000ll;
         int32_t bitrate = mCumulativeBytes * 8 / timeDiff;
         mLastEarlyNotifyTimeUs = nowUs;
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 8990f0c..41f2d67 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -255,9 +255,34 @@
     if (params->findInt32(kKeyRtpCvoDegrees, &rtpCVODegrees))
         mRTPCVODegrees = rtpCVODegrees;
 
+    bool needToSetSockOpt = false;
     int32_t dscp = 0;
-    if (params->findInt32(kKeyRtpDscp, &dscp))
-        updateSocketDscp(dscp);
+    if (params->findInt32(kKeyRtpDscp, &dscp)) {
+        mRtpLayer3Dscp = dscp << 2;
+        needToSetSockOpt = true;
+    }
+
+    int32_t ecn = 0;
+    if (params->findInt32(kKeyRtpEcn, &ecn)) {
+        /*
+         * @ecn, possible value for ECN.
+         *  +-----+-----+
+         *  | ECN FIELD |
+         *  +-----+-----+
+         *    ECT   CE         [Obsolete] RFC 2481 names for the ECN bits.
+         *     0     0         Not-ECT
+         *     0     1         ECT (ECN-Capable Transport) (1)
+         *     1     0         ECT (ECN-Capable Transport) (0)
+         *     1     1         CE (Congestion Experienced)
+         *
+         */
+        mRtpSockOptEcn = ecn;
+        needToSetSockOpt = true;
+    }
+
+    if (needToSetSockOpt) {
+        updateSocketOpt();
+    }
 
     int64_t sockNetwork = 0;
     if (params->findInt64(kKeySocketNetwork, &sockNetwork))
@@ -1438,18 +1463,29 @@
     mPayloadType = payloadType;
 }
 
-void ARTPWriter::updateSocketDscp(int32_t dscp) {
-    mRtpLayer3Dscp = dscp << 2;
+/*
+ * This function will set socket option in IP header
+ */
+void ARTPWriter::updateSocketOpt() {
+    /*
+     * 0     1     2     3     4     5     6     7
+     * +-----+-----+-----+-----+-----+-----+-----+-----+
+     * |          DS FIELD, DSCP           | ECN FIELD |
+     * +-----+-----+-----+-----+-----+-----+-----+-----+
+     */
+    int sockOpt = mRtpLayer3Dscp ^ mRtpSockOptEcn;
+    ALOGD("Update socket opt with sockopt=%d, mRtpLayer3Dscp=%d, mRtpSockOptEcn=%d",
+                sockOpt, mRtpLayer3Dscp, mRtpSockOptEcn);
 
-    /* mRtpLayer3Dscp will be mapped to WMM(Wifi) as per operator's requirement */
-    if (setsockopt(mRTPSocket, IPPROTO_IP, IP_TOS,
-                (int *)&mRtpLayer3Dscp, sizeof(mRtpLayer3Dscp)) < 0) {
-        ALOGE("failed to set dscp on rtpsock. err=%s", strerror(errno));
+    /* sockOpt will be used to set socket option in IP header */
+    if (setsockopt(mRTPSocket, mIsIPv6 ? IPPROTO_IPV6 : IPPROTO_IP, mIsIPv6 ? IPV6_TCLASS : IP_TOS,
+                (int *)&sockOpt, sizeof(sockOpt)) < 0) {
+        ALOGE("failed to set sockopt on rtpsock. err=%s", strerror(errno));
     } else {
-        ALOGD("successfully set dscp on rtpsock. opt=%d", mRtpLayer3Dscp);
-        setsockopt(mRTCPSocket, IPPROTO_IP, IP_TOS,
-                (int *)&mRtpLayer3Dscp, sizeof(mRtpLayer3Dscp));
-        ALOGD("successfully set dscp on rtcpsock. opt=%d", mRtpLayer3Dscp);
+        ALOGD("successfully set sockopt. opt=%d", sockOpt);
+        setsockopt(mRTCPSocket, mIsIPv6 ? IPPROTO_IPV6 : IPPROTO_IP, mIsIPv6 ? IPV6_TCLASS : IP_TOS,
+                (int *)&sockOpt, sizeof(sockOpt));
+        ALOGD("successfully set sockopt rtcpsock. opt=%d", sockOpt);
     }
 }
 
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
index 73d2866..250de71 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
@@ -20,6 +20,7 @@
 
 #include <media/stagefright/foundation/AHandler.h>
 #include <utils/List.h>
+#include <sys/socket.h>
 
 namespace android {
 
@@ -48,6 +49,8 @@
     void setSelfID(const uint32_t selfID);
     void setStaticJitterTimeMs(const uint32_t jbTimeMs);
     void setTargetBitrate(int32_t targetBitrate);
+    void setRtpSockOptEcn(int32_t sockOptEcn);
+    void setIsIPv6(const char *localIp);
 
     // Creates a pair of UDP datagram sockets bound to adjacent ports
     // (the rtpSocket is bound to an even port, the rtcpSocket to the
@@ -60,7 +63,8 @@
     static void MakeRTPSocketPair(
             int *rtpSocket, int *rtcpSocket,
             const char *localIp, const char *remoteIp,
-            unsigned localPort, unsigned remotePort, int64_t socketNetwork = 0);
+            unsigned localPort, unsigned remotePort, int64_t socketNetwork = 0,
+            int32_t sockOptEcn = 0);
 
 protected:
     virtual ~ARTPConnection();
@@ -77,6 +81,7 @@
     };
 
     static const int64_t kSelectTimeoutUs;
+    static const int64_t kMinOneSecondNotifyDelayUs;
 
     uint32_t mFlags;
 
@@ -87,9 +92,12 @@
     int64_t mLastReceiverReportTimeUs;
     int64_t mLastBitrateReportTimeUs;
     int64_t mLastEarlyNotifyTimeUs;
+    int64_t mLastCongestionNotifyTimeUs;
 
     int32_t mSelfID;
     int32_t mTargetBitrate;
+    int32_t mRtpSockOptEcn;
+    bool mIsIPv6;
 
     uint32_t mStaticJitterTimeMs;
 
@@ -103,6 +111,8 @@
     void onInjectPacket(const sp<AMessage> &msg);
     void onSendReceiverReports();
     void checkRxBitrate(int64_t nowUs);
+    void notifyCongestionToUpperLayerIfNeeded(StreamInfo *s);
+    void handleIpHeadersIfReceived(StreamInfo *s, struct msghdr sMsg);
 
     status_t receive(StreamInfo *info, bool receiveRTP);
     ssize_t send(const StreamInfo *info, const sp<ABuffer> buffer);
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
index e9b4942..3fa5713 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
@@ -50,6 +50,7 @@
         RTCP_FIRST_PACKET = 101,
         RTP_QUALITY = 102,
         RTP_QUALITY_EMC = 103,
+        RTP_QUALITY_CD = 104,
         RTCP_SR = 200,
         RTCP_RR = 201,
         RTCP_TSFB = 205,
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
index 2982cf6..ecd29d0 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
@@ -50,7 +50,7 @@
     virtual status_t pause();
     void updateCVODegrees(int32_t cvoDegrees);
     void updatePayloadType(int32_t payloadType);
-    void updateSocketDscp(int32_t dscp);
+    void updateSocketOpt();
     void updateSocketNetwork(int64_t socketNetwork);
     uint32_t getSequenceNum();
     virtual uint64_t getAccumulativeBytes() override;
@@ -98,6 +98,7 @@
     struct sockaddr_in6 mRTPAddr6;
     struct sockaddr_in6 mRTCPAddr6;
     int32_t mRtpLayer3Dscp;
+    int32_t mRtpSockOptEcn;
     net_handle_t mRTPSockNetwork;
 
     AString mProfileLevel;
diff --git a/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp b/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp
index 5df3267..70d73c8 100644
--- a/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp
+++ b/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp
@@ -52,7 +52,10 @@
 
   MediaMuxer::OutputFormat format =
       (MediaMuxer::OutputFormat)fdp.ConsumeIntegralInRange<int32_t>(0, 4);
-  sp<MediaMuxer> mMuxer(new MediaMuxer(fd, format));
+  sp<MediaMuxer> mMuxer = MediaMuxer::create(fd, format);
+  if (mMuxer == nullptr) {
+    return 0;
+  }
 
   while (fdp.remaining_bytes() > 1) {
     switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 4)) {
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 5eaadbd..3823c36 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -54,6 +54,19 @@
 
 static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
 
+bool WebmWriter::isFdOpenModeValid(int fd) {
+    // check for invalid file descriptor.
+    if (!MediaWriter::isFdOpenModeValid(fd)) {
+        return false;
+    }
+    int flags = fcntl(fd, F_GETFL);
+    if ((flags & O_RDWR) == 0) {
+        ALOGE("File must be in read-write mode for webm writer");
+        return false;
+    }
+    return true;
+}
+
 WebmWriter::WebmWriter(int fd)
     : mFd(dup(fd)),
       mInitCheck(mFd < 0 ? NO_INIT : OK),
diff --git a/media/libstagefright/webm/include/webm/WebmWriter.h b/media/libstagefright/webm/include/webm/WebmWriter.h
index ed5bc4c..e339add 100644
--- a/media/libstagefright/webm/include/webm/WebmWriter.h
+++ b/media/libstagefright/webm/include/webm/WebmWriter.h
@@ -36,6 +36,10 @@
 
 class WebmWriter : public MediaWriter {
 public:
+    // Returns true if the file descriptor is opened using a mode
+    // which is compatible with WebmWriter.
+    // Note that this overloads that method in the base class.
+    static bool isFdOpenModeValid(int fd);
     explicit WebmWriter(int fd);
     ~WebmWriter() { reset(); }
 
diff --git a/media/module/extractors/fuzzers/Android.bp b/media/module/extractors/fuzzers/Android.bp
index 490e195..b3e34d2 100644
--- a/media/module/extractors/fuzzers/Android.bp
+++ b/media/module/extractors/fuzzers/Android.bp
@@ -173,7 +173,7 @@
     ],
 
     static_libs: [
-        "libwebm",
+        "libwebm_mkvparser",
         "libstagefright_flacdec",
         "libstagefright_metadatautils",
         "libmkvextractor",
diff --git a/media/module/extractors/mkv/Android.bp b/media/module/extractors/mkv/Android.bp
index 98ce305..c4b67eb 100644
--- a/media/module/extractors/mkv/Android.bp
+++ b/media/module/extractors/mkv/Android.bp
@@ -33,7 +33,7 @@
         "libstagefright_foundation_colorutils_ndk",   // for mainline-safe ColorUtils
         "libstagefright_foundation",
         "libstagefright_metadatautils",
-        "libwebm",
+        "libwebm_mkvparser",
         "libutils",
     ],
 
diff --git a/media/module/extractors/tests/Android.bp b/media/module/extractors/tests/Android.bp
index 3c3bbdc..d6e79c7 100644
--- a/media/module/extractors/tests/Android.bp
+++ b/media/module/extractors/tests/Android.bp
@@ -55,7 +55,7 @@
         "libmedia_midiiowrapper",
         "libsonivoxwithoutjet",
         "libvorbisidec",
-        "libwebm",
+        "libwebm_mkvparser",
         "libFLAC",
     ],
 
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index ddc71db..fded4f5 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -52,6 +52,9 @@
     symbol_file: "libmediandk.map.txt",
     first_version: "21",
     unversioned_until: "current",
+    export_header_libs: [
+        "libmediandk_headers",
+    ],
 }
 
 ndk_headers {
@@ -167,7 +170,7 @@
     stubs: {
         symbol_file: "libmediandk.map.txt",
         versions: ["29"],
-    }
+    },
 }
 
 cc_library {
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 0bdb41b..c2093ac 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -264,6 +264,13 @@
         case HAL_PIXEL_FORMAT_YCrCb_420_SP:
             *pixelStride = (planeIdx == 0) ? 1 : 2;
             return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
+            if (mLockedBuffer->dataCb && mLockedBuffer->dataCr) {
+                *pixelStride = (planeIdx == 0) ? 2 : mLockedBuffer->chromaStep;
+            } else {
+                *pixelStride = (planeIdx == 0) ? 2 : 4;
+            }
+            return AMEDIA_OK;
         case HAL_PIXEL_FORMAT_Y8:
             *pixelStride = 1;
             return AMEDIA_OK;
@@ -333,6 +340,13 @@
             *rowStride = (planeIdx == 0) ? mLockedBuffer->stride
                                          : ALIGN(mLockedBuffer->stride / 2, 16);
             return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
+            if (mLockedBuffer->dataCb && mLockedBuffer->dataCr) {
+                *rowStride = (planeIdx == 0) ?  mLockedBuffer->stride : mLockedBuffer->chromaStride;
+            } else {
+                *rowStride = mLockedBuffer->stride * 2;
+            }
+            return AMEDIA_OK;
         case HAL_PIXEL_FORMAT_RAW10:
         case HAL_PIXEL_FORMAT_RAW12:
             // RAW10 and RAW12 are used for 10-bit and 12-bit raw data, they are single plane
@@ -490,6 +504,47 @@
                                     : (planeIdx == 1) ? cb : cr;
             dataSize = (planeIdx == 0) ? ySize : cSize;
             break;
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
+            if (mLockedBuffer->height % 2 != 0) {
+                ALOGE("YCBCR_P010: height (%d) should be a multiple of 2", mLockedBuffer->height);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+
+            if (mLockedBuffer->width <= 0) {
+                ALOGE("YCBCR_P010: width (%d) should be a > 0", mLockedBuffer->width);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+
+            if (mLockedBuffer->height <= 0) {
+                ALOGE("YCBCR_P010: height (%d) should be a > 0", mLockedBuffer->height);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+
+            if (mLockedBuffer->dataCb && mLockedBuffer->dataCr) {
+                pData = (planeIdx == 0) ?  mLockedBuffer->data :
+                        (planeIdx == 1) ?  mLockedBuffer->dataCb : mLockedBuffer->dataCr;
+                // only map until last pixel
+                if (planeIdx == 0) {
+                    cStride = mLockedBuffer->stride;
+                    dataSize = cStride * (mLockedBuffer->height - 1) + mLockedBuffer->width * 2;
+                } else {
+                    bytesPerPixel = mLockedBuffer->chromaStep;
+                    cStride = mLockedBuffer->chromaStride;
+                    dataSize = cStride * (mLockedBuffer->height / 2 - 1) +
+                            bytesPerPixel * (mLockedBuffer->width / 2);
+                }
+                break;
+            }
+
+            cStride = mLockedBuffer->stride * 2;
+            ySize = cStride * mLockedBuffer->height;
+            cSize = ySize / 2;
+            cb = mLockedBuffer->data + ySize;
+            cr = cb + 2;
+
+            pData = (planeIdx == 0) ?  mLockedBuffer->data : (planeIdx == 1) ?  cb : cr;
+            dataSize = (planeIdx == 0) ? ySize : cSize;
+            break;
         case HAL_PIXEL_FORMAT_Y8:
             // Single plane, 8bpp.
 
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index ac5cba8..067c8f4 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -76,6 +76,7 @@
         case AIMAGE_FORMAT_HEIC:
         case AIMAGE_FORMAT_DEPTH_JPEG:
         case AIMAGE_FORMAT_RAW_DEPTH10:
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
             return true;
         case AIMAGE_FORMAT_PRIVATE:
             // For private format, cpu usage is prohibited.
@@ -89,6 +90,7 @@
 AImageReader::getNumPlanesForFormat(int32_t format) {
     switch (format) {
         case AIMAGE_FORMAT_YUV_420_888:
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
             return 3;
         case AIMAGE_FORMAT_RGBA_8888:
         case AIMAGE_FORMAT_RGBX_8888:
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index ed31c02..b230df5 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -64,6 +64,10 @@
 
         if (untranslated.find(err) == untranslated.end()) {
             ALOGE("untranslated sf error code: %d", err);
+            char err_as_string[32];
+            snprintf(err_as_string, sizeof(err_as_string), "%d", err);
+            android_errorWriteWithInfoLog(0x534e4554, "224869524", -1,
+                                          err_as_string, strlen(err_as_string));
             untranslated.insert(err);
         }
     }
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index 1965e62..9d62884 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -46,7 +46,7 @@
     if (mData == nullptr) {
         return nullptr;
     }
-    mData->mImpl = new (std::nothrow) MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+    mData->mImpl = MediaMuxer::create(fd, (MediaMuxer::OutputFormat)format);
     if (mData->mImpl == nullptr) {
         delete mData;
         return nullptr;
diff --git a/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.cpp b/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.cpp
index c7ce950..fa81cd8 100644
--- a/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.cpp
+++ b/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.cpp
@@ -264,11 +264,11 @@
     }
 }
 
-AMediaFormat* NdkMediaCodecFuzzerBase::getSampleCodecFormat() {
-    AMediaFormat* format = AMediaFormat_new();
+void NdkMediaCodecFuzzerBase::setCodecFormat() {
     std::string value;
     int32_t count = 0;
     int32_t maxFormatKeys = 0;
+    AMediaFormat_clear(mFormat);
 
     /*set mimeType*/
     if (mFdp->ConsumeBool()) {
@@ -277,37 +277,36 @@
         value = mFdp->PickValueInArray(kMimeTypes);
     }
     if (mFdp->ConsumeBool()) {
-        AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, value.c_str());
+        AMediaFormat_setString(mFormat, AMEDIAFORMAT_KEY_MIME, value.c_str());
     }
 
     maxFormatKeys = mFdp->ConsumeIntegralInRange<int32_t>(0, std::size(kFormatStringKeys));
     for (count = 0; count < maxFormatKeys; ++count) {
         std::string formatKey = mFdp->PickValueInArray(kFormatStringKeys);
-        formatSetString(format, formatKey.c_str(), mFdp);
+        formatSetString(mFormat, formatKey.c_str(), mFdp);
     }
 
     maxFormatKeys = mFdp->ConsumeIntegralInRange<int32_t>(0, std::size(kFormatIntKeys));
     for (count = 0; count < maxFormatKeys; ++count) {
         std::string formatKey = mFdp->PickValueInArray(kFormatIntKeys);
-        formatSetInt(format, formatKey.c_str(), mFdp);
+        formatSetInt(mFormat, formatKey.c_str(), mFdp);
     }
 
     maxFormatKeys = mFdp->ConsumeIntegralInRange<int32_t>(0, std::size(kFormatFloatKeys));
     for (count = 0; count < maxFormatKeys; ++count) {
         std::string formatKey = mFdp->PickValueInArray(kFormatFloatKeys);
-        formatSetFloat(format, formatKey.c_str(), mFdp);
+        formatSetFloat(mFormat, formatKey.c_str(), mFdp);
     }
 
     maxFormatKeys = mFdp->ConsumeIntegralInRange<int32_t>(0, std::size(kFormatBufferKeys));
     for (count = 0; count < maxFormatKeys; ++count) {
         std::string formatKey = mFdp->PickValueInArray(kFormatBufferKeys);
-        formatSetBuffer(format, formatKey.c_str(), mFdp);
+        formatSetBuffer(mFormat, formatKey.c_str(), mFdp);
     }
-    return format;
 }
 
 AMediaCodec* NdkMediaCodecFuzzerBase::createCodec(bool isEncoder, bool isCodecForClient) {
-    mFormat = getSampleCodecFormat();
+    setCodecFormat();
     return (mFdp->ConsumeBool() ? createAMediaCodecByname(isEncoder, isCodecForClient)
                                 : createAMediaCodecByType(isEncoder, isCodecForClient));
 }
diff --git a/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.h b/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.h
index e810e55..2875f9f 100644
--- a/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.h
+++ b/media/ndk/fuzzer/NdkMediaCodecFuzzerBase.h
@@ -39,6 +39,7 @@
 
 class NdkMediaCodecFuzzerBase {
   public:
+    NdkMediaCodecFuzzerBase() { mFormat = AMediaFormat_new(); }
     void invokeCodecFormatAPI(AMediaCodec* codec);
     void invokeInputBufferOperationAPI(AMediaCodec* codec);
     void invokeOutputBufferOperationAPI(AMediaCodec* codec);
@@ -57,7 +58,7 @@
     AMediaCodec* createAMediaCodecByType(bool isEncoder, bool isCodecForClient);
     AMediaFormat* getSampleAudioFormat();
     AMediaFormat* getSampleVideoFormat();
-    AMediaFormat* getSampleCodecFormat();
+    void setCodecFormat();
     AMediaFormat* mFormat = nullptr;
     FuzzedDataProvider* mFdp = nullptr;
 };
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java
index 21ba957..7fd2752 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java
@@ -60,14 +60,12 @@
     private static final String mStatsFile =
             mContext.getExternalFilesDir(null) + "/Muxer." + System.currentTimeMillis() + ".csv";
     private static final String TAG = "MuxerTest";
-    private static final Map<String, Integer> mMapFormat = new Hashtable<String, Integer>() {
-        {
-            put("mp4", MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
-            put("webm", MediaMuxer.OutputFormat.MUXER_OUTPUT_WEBM);
-            put("3gpp", MediaMuxer.OutputFormat.MUXER_OUTPUT_3GPP);
-            put("ogg", MediaMuxer.OutputFormat.MUXER_OUTPUT_OGG);
-        }
-    };
+    private static final Map<String, Integer> mMapFormat = Map.of(
+            "mp4", MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4,
+            "webm", MediaMuxer.OutputFormat.MUXER_OUTPUT_WEBM,
+            "3gpp", MediaMuxer.OutputFormat.MUXER_OUTPUT_3GPP,
+            "ogg", MediaMuxer.OutputFormat.MUXER_OUTPUT_OGG);
+
     private String mInputFileName;
     private String mFormat;
 
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp
index 043bc9e..6ea954c 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp
@@ -79,7 +79,6 @@
         vector<AMediaCodecBufferInfo> frameInfo;
         AMediaCodecBufferInfo info;
         uint32_t inputBufferOffset = 0;
-
         // Get frame data
         while (1) {
             status = extractor->getFrameSample(info);
@@ -110,7 +109,7 @@
         const char *statsFile = env->GetStringUTFChars(jStatsFile, nullptr);
         string sInputReference = string(inputReference);
         decoder->dumpStatistics(sInputReference, sCodecName, (asyncMode ? "async" : "sync"),
-                                statsFile);
+                                (statsFile == nullptr ? "" : statsFile));
         env->ReleaseStringUTFChars(jCodecName, codecName);
         env->ReleaseStringUTFChars(jStatsFile, statsFile);
         env->ReleaseStringUTFChars(jFileName, inputReference);
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
index 66fee33..c2ed82c 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
@@ -49,7 +49,7 @@
     private FileOutputStream mOutputStream;
 
     public Decoder() { mStats = new Stats(); }
-
+    public Stats getStats() { return mStats; };
     /**
      * Setup of decoder
      *
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java
index 7245a3a..0ebf798 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java
@@ -33,7 +33,17 @@
     private long mDeInitTimeNs;
     private long mStartTimeNs;
     private ArrayList<Integer> mFrameSizes;
+    /*
+     * Array for holding the wallclock time
+     * for each input buffer available.
+     */
     private ArrayList<Long> mInputTimer;
+    /*
+     * Array for holding the wallclock time
+     * for each output buffer available.
+     * This is used for determining the decoded
+     * frame intervals.
+     */
     private ArrayList<Long> mOutputTimer;
 
     public Stats() {
@@ -76,9 +86,15 @@
 
     public long getDeInitTime() { return mDeInitTimeNs; }
 
+    public long getStartTime() { return mStartTimeNs; }
+
+    public ArrayList<Long> getOutputTimers() { return mOutputTimer; }
+
+    public ArrayList<Long> getInputTimers() { return mInputTimer; }
+
     public long getTimeDiff(long sTime, long eTime) { return (eTime - sTime); }
 
-    private long getTotalTime() {
+    public long getTotalTime() {
         if (mOutputTimer.size() == 0) {
             return -1;
         }
@@ -86,7 +102,7 @@
         return lastTime - mStartTimeNs;
     }
 
-    private long getTotalSize() {
+    public long getTotalSize() {
         long totalSize = 0;
         for (long size : mFrameSizes) {
             totalSize += size;
diff --git a/media/tests/benchmark/src/native/common/Stats.cpp b/media/tests/benchmark/src/native/common/Stats.cpp
index bfde125..d55a22d 100644
--- a/media/tests/benchmark/src/native/common/Stats.cpp
+++ b/media/tests/benchmark/src/native/common/Stats.cpp
@@ -35,13 +35,18 @@
  * \param mode           the operating mode: sync/async.
  * \param statsFile      the file where the stats data is to be written.
  */
-void Stats::dumpStatistics(string operation, string inputReference, int64_t durationUs,
-                           string componentName, string mode, string statsFile) {
+void Stats::dumpStatistics(const string& operation, const string& inputReference,
+                           int64_t durationUs, const string& componentName,
+                           const string& mode, const string& statsFile) {
     ALOGV("In %s", __func__);
     if (!mOutputTimer.size()) {
         ALOGE("No output produced");
         return;
     }
+    if (statsFile.empty()) {
+        return uploadMetrics(operation, inputReference, durationUs, componentName,
+                              mode);
+    }
     nsecs_t totalTimeTakenNs = getTotalTime();
     nsecs_t timeTakenPerSec = (totalTimeTakenNs * 1000000) / durationUs;
     nsecs_t timeToFirstFrameNs = *mOutputTimer.begin() - mStartTimeNs;
@@ -87,3 +92,67 @@
     out << rowData;
     out.close();
 }
+
+/**
+ * Dumps the stats of the operation for a given input media to a listener.
+ *
+ * \param operation      describes the operation performed on the input media
+ *                       (i.e. extract/mux/decode/encode)
+ * \param inputReference input media
+ * \param durationUs     is a duration of the input media in microseconds.
+ * \param componentName  describes the codecName/muxFormat/mimeType.
+ * \param mode           the operating mode: sync/async.
+ *
+ */
+
+#define LOG_METRIC(...) \
+    __android_log_print(ANDROID_LOG_INFO, "ForTimingCollector", __VA_ARGS__)
+
+void Stats::uploadMetrics(const string& operation, const string& inputReference,
+                          const int64_t& durationUs, const string& componentName,
+                          const string& mode) {
+
+    ALOGV("In %s", __func__);
+    (void)durationUs;
+    (void)componentName;
+    if (!mOutputTimer.size()) {
+        ALOGE("No output produced");
+        return;
+    }
+    nsecs_t totalTimeTakenNs = getTotalTime();
+    nsecs_t timeToFirstFrameNs = *mOutputTimer.begin() - mStartTimeNs;
+    int32_t size = std::accumulate(mFrameSizes.begin(), mFrameSizes.end(), 0);
+    // get min and max output intervals.
+    nsecs_t intervalNs;
+    nsecs_t minTimeTakenNs = INT64_MAX;
+    nsecs_t maxTimeTakenNs = 0;
+    nsecs_t prevIntervalNs = mStartTimeNs;
+    for (int32_t idx = 0; idx < mOutputTimer.size() - 1; idx++) {
+        intervalNs = mOutputTimer.at(idx) - prevIntervalNs;
+        prevIntervalNs = mOutputTimer.at(idx);
+        if (minTimeTakenNs > intervalNs) minTimeTakenNs = intervalNs;
+        else if (maxTimeTakenNs < intervalNs) maxTimeTakenNs = intervalNs;
+    }
+
+    // Write the stats data to file.
+    int64_t dataSize = size;
+    int64_t bytesPerSec = ((int64_t)dataSize * 1000000000) / totalTimeTakenNs;
+    (void)mode;
+    (void)operation;
+    (void)inputReference;
+    string prefix = "CodecStats_NativeDec";
+    prefix.append("_").append(componentName);
+    // Reports the time taken to initialize the codec.
+    LOG_METRIC("%s_CodecInitTimeNs:%lld", prefix.c_str(), (long long)mInitTimeNs);
+    // Reports the time taken to free the codec.
+    LOG_METRIC("%s_CodecDeInitTimeNs:%lld", prefix.c_str(), (long long)mDeInitTimeNs);
+    // Reports the min time taken between output frames from the codec
+    LOG_METRIC("%s_CodecMinTimeNs:%lld", prefix.c_str(), (long long)minTimeTakenNs);
+    // Reports the max time between the output frames from the codec
+    LOG_METRIC("%s_CodecMaxTimeNs:%lld", prefix.c_str(), (long long)maxTimeTakenNs);
+    // Report raw throughout ( bytes/sec ) of the codec for the entire media
+    LOG_METRIC("%s_ProcessedBytesPerSec:%lld", prefix.c_str(), (long long)bytesPerSec);
+    // Reports the time taken to get the first frame from the codec
+    LOG_METRIC("%s_TimeforFirstFrame:%lld", prefix.c_str(), (long long)timeToFirstFrameNs);
+
+}
diff --git a/media/tests/benchmark/src/native/common/Stats.h b/media/tests/benchmark/src/native/common/Stats.h
index 18e4b06..0ba511f 100644
--- a/media/tests/benchmark/src/native/common/Stats.h
+++ b/media/tests/benchmark/src/native/common/Stats.h
@@ -102,8 +102,12 @@
         return (*(mOutputTimer.end() - 1) - mStartTimeNs);
     }
 
-    void dumpStatistics(string operation, string inputReference, int64_t duarationUs,
-                        string codecName = "", string mode = "", string statsFile = "");
-};
+    void dumpStatistics(const string& operation, const string& inputReference,
+                        int64_t duarationUs, const string& componentName = "",
+                        const string& mode = "", const string& statsFile = "");
 
+    void uploadMetrics(const string& operation, const string& inputReference,
+                      const int64_t& durationUs, const string& componentName = "",
+                      const string& mode = "");
+};
 #endif  // __STATS_H__
diff --git a/media/utils/MemoryLeakTrackUtil.cpp b/media/utils/MemoryLeakTrackUtil.cpp
index fdb8c4f..7451033 100644
--- a/media/utils/MemoryLeakTrackUtil.cpp
+++ b/media/utils/MemoryLeakTrackUtil.cpp
@@ -33,10 +33,8 @@
 #define ABI_STRING "arm"
 #elif defined(__aarch64__)
 #define ABI_STRING "arm64"
-#elif defined(__mips__) && !defined(__LP64__)
-#define ABI_STRING "mips"
-#elif defined(__mips__) && defined(__LP64__)
-#define ABI_STRING "mips64"
+#elif defined(__riscv)
+#define ABI_STRING "riscv64"
 #elif defined(__i386__)
 #define ABI_STRING "x86"
 #elif defined(__x86_64__)
diff --git a/media/utils/include/mediautils/SharedMemoryAllocator.h b/media/utils/include/mediautils/SharedMemoryAllocator.h
index cf3a662..17c1ac9 100644
--- a/media/utils/include/mediautils/SharedMemoryAllocator.h
+++ b/media/utils/include/mediautils/SharedMemoryAllocator.h
@@ -325,8 +325,8 @@
                 continue;
             }
             const auto heap = handle->getMemory();
-            dump << std::setw(8) << heap->getHeapID() << std::setw(8) << heap->getOffset()
-                 << std::setw(8) << heap->getSize() << std::setw(8) << value.allocation_number
+            dump << std::setw(8) << heap->getHeapID() << std::setw(8) << heap->getSize()
+                 << std::setw(8) << heap->getOffset() << std::setw(8) << value.allocation_number
                  << "   " << value.name << "\n";
         }
         return dump.str();
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 87c67bd..27ac212 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -43,7 +43,9 @@
         "FastThread.cpp",
         "FastThreadDumpState.cpp",
         "FastThreadState.cpp",
+        "MelReporter.cpp",
         "NBAIO_Tee.cpp",
+        "PatchCommandThread.cpp",
         "PatchPanel.cpp",
         "PropertyUtils.cpp",
         "SpdifStreamOut.cpp",
@@ -99,6 +101,7 @@
         "libaaudio_headers",
         "libaudioclient_headers",
         "libaudiohal_headers",
+        "libaudioutils_headers",
         "libmedia_headers",
     ],
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 3e9400b..d5100fe 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -284,7 +284,6 @@
                 return opPackageLegacy == package; }) == packages.end()) {
             ALOGW("The package name(%s) provided does not correspond to the uid %d",
                     attributionSource.packageName.value_or("").c_str(), attributionSource.uid);
-            checkedAttributionSource.packageName = std::optional<std::string>();
         }
     }
     return checkedAttributionSource;
@@ -323,7 +322,9 @@
       mClientSharedHeapSize(kMinimumClientSharedHeapSizeBytes),
       mGlobalEffectEnableTime(0),
       mPatchPanel(this),
-      mDeviceEffectManager(this),
+      mPatchCommandThread(sp<PatchCommandThread>::make()),
+      mDeviceEffectManager(sp<DeviceEffectManager>::make(*this)),
+      mMelReporter(sp<MelReporter>::make(*this)),
       mSystemReady(false)
 {
     // Move the audio session unique ID generator start base as time passes to limit risk of
@@ -583,6 +584,33 @@
     audio_io_handle_t io = AUDIO_IO_HANDLE_NONE;
     audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
     audio_attributes_t localAttr = *attr;
+
+    // TODO b/182392553: refactor or make clearer
+    pid_t clientPid =
+        VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(client.attributionSource.pid));
+    bool updatePid = (clientPid == (pid_t)-1);
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+
+    AttributionSourceState adjAttributionSource = client.attributionSource;
+    if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
+        uid_t clientUid =
+            VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(client.attributionSource.uid));
+        ALOGW_IF(clientUid != callingUid,
+                "%s uid %d tried to pass itself off as %d",
+                __FUNCTION__, callingUid, clientUid);
+        adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+        updatePid = true;
+    }
+    if (updatePid) {
+        const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW_IF(clientPid != (pid_t)-1 && clientPid != callingPid,
+                 "%s uid %d pid %d tried to pass itself off as pid %d",
+                 __func__, callingUid, callingPid, clientPid);
+        adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
+    }
+    adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+            adjAttributionSource);
+
     if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
         audio_config_t fullConfig = AUDIO_CONFIG_INITIALIZER;
         fullConfig.sample_rate = config->sample_rate;
@@ -592,7 +620,7 @@
         bool isSpatialized;
         ret = AudioSystem::getOutputForAttr(&localAttr, &io,
                                             actualSessionId,
-                                            &streamType, client.attributionSource,
+                                            &streamType, adjAttributionSource,
                                             &fullConfig,
                                             (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
                                                     AUDIO_OUTPUT_FLAG_DIRECT),
@@ -608,7 +636,7 @@
         ret = AudioSystem::getInputForAttr(&localAttr, &io,
                                               RECORD_RIID_INVALID,
                                               actualSessionId,
-                                              client.attributionSource,
+                                              adjAttributionSource,
                                               config,
                                               AUDIO_INPUT_FLAG_MMAP_NOIRQ, deviceId, &portId);
     }
@@ -642,21 +670,22 @@
 }
 
 /* static */
-int AudioFlinger::onExternalVibrationStart(const sp<os::ExternalVibration>& externalVibration) {
+os::HapticScale AudioFlinger::onExternalVibrationStart(
+        const sp<os::ExternalVibration>& externalVibration) {
     sp<os::IExternalVibratorService> evs = getExternalVibratorService();
     if (evs != nullptr) {
         int32_t ret;
         binder::Status status = evs->onExternalVibrationStart(*externalVibration, &ret);
         if (status.isOk()) {
             ALOGD("%s, start external vibration with intensity as %d", __func__, ret);
-            return ret;
+            return os::ExternalVibration::externalVibrationScaleToHapticScale(ret);
         }
     }
     ALOGD("%s, start external vibration with intensity as MUTE due to %s",
             __func__,
             evs == nullptr ? "external vibration service not found"
                            : "error when querying intensity");
-    return static_cast<int>(os::HapticScale::MUTE);
+    return os::HapticScale::MUTE;
 }
 
 /* static */
@@ -731,12 +760,11 @@
 {
     String8 result;
 
-    result.append("Clients:\n");
-    result.append("   pid    heap_size\n");
+    result.append("Client Allocators:\n");
     for (size_t i = 0; i < mClients.size(); ++i) {
         sp<Client> client = mClients.valueAt(i).promote();
         if (client != 0) {
-          result.append("Client: %d\n", client->pid());
+          result.appendFormat("Client: %d\n", client->pid());
           result.append(client->allocator().dump().c_str());
         }
    }
@@ -873,7 +901,10 @@
 
         mPatchPanel.dump(fd);
 
-        mDeviceEffectManager.dump(fd);
+        mDeviceEffectManager->dump(fd);
+
+        std::string melOutput = mMelReporter->dump();
+        write(fd, melOutput.c_str(), melOutput.size());
 
         // dump external setParameters
         auto dumpLogger = [fd](SimpleLog& logger, const char* name) {
@@ -1063,7 +1094,7 @@
     audio_attributes_t localAttr = input.attr;
 
     AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
-    if (!isAudioServerOrMediaServerUid(callingUid)) {
+    if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
         ALOGW_IF(clientUid != callingUid,
                 "%s uid %d tried to pass itself off as %d",
                 __FUNCTION__, callingUid, clientUid);
@@ -1079,6 +1110,8 @@
         clientPid = callingPid;
         adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
     }
+    adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+            adjAttributionSource);
 
     audio_session_t sessionId = input.sessionId;
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
@@ -2281,7 +2314,7 @@
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
     const uid_t currentUid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(
            adjAttributionSource.uid));
-    if (!isAudioServerOrMediaServerUid(callingUid)) {
+    if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
         ALOGW_IF(currentUid != callingUid,
                 "%s uid %d tried to pass itself off as %d",
                 __FUNCTION__, callingUid, currentUid);
@@ -2297,7 +2330,8 @@
                  __func__, callingUid, callingPid, currentPid);
         adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
     }
-
+    adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+            adjAttributionSource);
     // we don't yet support anything other than linear PCM
     if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
         ALOGE("createRecord() invalid format %#x", input.config.format);
@@ -2867,13 +2901,15 @@
                 ALOGV("openOutput_l() created spatializer output: ID %d thread %p",
                       *output, thread.get());
             } else if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-                thread = new OffloadThread(this, outputStream, *output, mSystemReady);
+                thread = new OffloadThread(this, outputStream, *output,
+                        mSystemReady, halConfig->offload_info);
                 ALOGV("openOutput_l() created offload output: ID %d thread %p",
                       *output, thread.get());
             } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
                     || !isValidPcmSinkFormat(halConfig->format)
                     || !isValidPcmSinkChannelMask(halConfig->channel_mask)) {
-                thread = new DirectOutputThread(this, outputStream, *output, mSystemReady);
+                thread = new DirectOutputThread(this, outputStream, *output,
+                        mSystemReady, halConfig->offload_info);
                 ALOGV("openOutput_l() created direct output: ID %d thread %p",
                       *output, thread.get());
             } else {
@@ -3939,7 +3975,7 @@
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
     adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
     pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(adjAttributionSource.pid));
-    if (currentPid == -1 || !isAudioServerOrMediaServerUid(callingUid)) {
+    if (currentPid == -1 || !isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
         const pid_t callingPid = IPCThreadState::self()->getCallingPid();
         ALOGW_IF(currentPid != -1 && currentPid != callingPid,
                  "%s uid %d pid %d tried to pass itself off as pid %d",
@@ -3947,6 +3983,7 @@
         adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
         currentPid = callingPid;
     }
+    adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(adjAttributionSource);
 
     ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d, factory %p",
           adjAttributionSource.pid, effectClient.get(), priority, sessionId, io,
@@ -4067,7 +4104,7 @@
         if (sessionId == AUDIO_SESSION_DEVICE) {
             sp<Client> client = registerPid(currentPid);
             ALOGV("%s device type %#x address %s", __func__, device.mType, device.getAddress());
-            handle = mDeviceEffectManager.createEffect_l(
+            handle = mDeviceEffectManager->createEffect_l(
                     &descOut, device, client, effectClient, mPatchPanel.patches_l(),
                     &enabledOut, &lStatus, probe, request.notifyFramesProcessed);
             if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 7c5afce..6a94164 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -82,6 +82,8 @@
 #include <audio_utils/clock.h>
 #include <audio_utils/FdToString.h>
 #include <audio_utils/LinearMap.h>
+#include <audio_utils/MelAggregator.h>
+#include <audio_utils/MelProcessor.h>
 #include <audio_utils/SimpleLog.h>
 #include <audio_utils/TimestampVerifier.h>
 
@@ -320,7 +322,8 @@
                             sp<MmapStreamInterface>& interface,
                             audio_port_handle_t *handle);
 
-    static int onExternalVibrationStart(const sp<os::ExternalVibration>& externalVibration);
+    static os::HapticScale onExternalVibrationStart(
+        const sp<os::ExternalVibration>& externalVibration);
     static void onExternalVibrationStop(const sp<os::ExternalVibration>& externalVibration);
 
     status_t addEffectToHal(audio_port_handle_t deviceId,
@@ -631,10 +634,14 @@
 
 #include "PatchPanel.h"
 
+#include "PatchCommandThread.h"
+
 #include "Effects.h"
 
 #include "DeviceEffectManager.h"
 
+#include "MelReporter.h"
+
     // Find io handle by session id.
     // Preference is given to an io handle with a matching effect chain to session id.
     // If none found, AUDIO_IO_HANDLE_NONE is returned.
@@ -1011,7 +1018,9 @@
     PatchPanel mPatchPanel;
     sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
 
-    DeviceEffectManager mDeviceEffectManager;
+    const sp<PatchCommandThread> mPatchCommandThread;
+    sp<DeviceEffectManager> mDeviceEffectManager;
+    sp<MelReporter> mMelReporter;
 
     bool       mSystemReady;
     std::atomic_bool mAudioPolicyReady{};
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index 4f3ed0a..a614736 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -32,16 +32,6 @@
 
 using media::IEffectClient;
 
-void AudioFlinger::DeviceEffectManager::createAudioPatch(audio_patch_handle_t handle,
-        const PatchPanel::Patch& patch) {
-    ALOGV("%s handle %d mHalHandle %d num sinks %d device sink %08x",
-            __func__, handle, patch.mHalHandle,
-            patch.mAudioPatch.num_sinks,
-            patch.mAudioPatch.num_sinks > 0 ? patch.mAudioPatch.sinks[0].ext.device.type : 0);
-
-    mCommandThread->createAudioPatchCommand(handle, patch);
-}
-
 void AudioFlinger::DeviceEffectManager::onCreateAudioPatch(audio_patch_handle_t handle,
         const PatchPanel::Patch& patch) {
     ALOGV("%s handle %d mHalHandle %d device sink %08x",
@@ -55,11 +45,6 @@
     }
 }
 
-void AudioFlinger::DeviceEffectManager::releaseAudioPatch(audio_patch_handle_t handle) {
-    ALOGV("%s", __func__);
-    mCommandThread->releaseAudioPatchCommand(handle);
-}
-
 void AudioFlinger::DeviceEffectManager::onReleaseAudioPatch(audio_patch_handle_t handle) {
     ALOGV("%s", __func__);
     Mutex::Autolock _l(mLock);
@@ -116,7 +101,7 @@
             }
         }
     }
-    if (enabled != NULL) {
+    if (enabled != nullptr) {
         *enabled = (int)effect->isEnabled();
     }
     *status = lStatus;
@@ -208,91 +193,4 @@
     return true;
 }
 
-// -----------  DeviceEffectManager::CommandThread implementation ----------
-
-
-AudioFlinger::DeviceEffectManager::CommandThread::~CommandThread()
-{
-    Mutex::Autolock _l(mLock);
-    mCommands.clear();
-}
-
-void AudioFlinger::DeviceEffectManager::CommandThread::onFirstRef()
-{
-    run("DeviceEffectManage_CommandThread", ANDROID_PRIORITY_AUDIO);
-}
-
-bool AudioFlinger::DeviceEffectManager::CommandThread::threadLoop()
-{
-    mLock.lock();
-    while (!exitPending())
-    {
-        while (!mCommands.empty() && !exitPending()) {
-            sp<Command> command = mCommands.front();
-            mCommands.pop_front();
-            mLock.unlock();
-
-            switch (command->mCommand) {
-            case CREATE_AUDIO_PATCH: {
-                CreateAudioPatchData *data = (CreateAudioPatchData *)command->mData.get();
-                ALOGV("CommandThread() processing create audio patch handle %d", data->mHandle);
-                mManager.onCreateAudioPatch(data->mHandle, data->mPatch);
-                } break;
-            case RELEASE_AUDIO_PATCH: {
-                ReleaseAudioPatchData *data = (ReleaseAudioPatchData *)command->mData.get();
-                ALOGV("CommandThread() processing release audio patch handle %d", data->mHandle);
-                mManager.onReleaseAudioPatch(data->mHandle);
-                } break;
-            default:
-                ALOGW("CommandThread() unknown command %d", command->mCommand);
-            }
-            mLock.lock();
-        }
-
-        // At this stage we have either an empty command queue or the first command in the queue
-        // has a finite delay. So unless we are exiting it is safe to wait.
-        if (!exitPending()) {
-            ALOGV("CommandThread() going to sleep");
-            mWaitWorkCV.wait(mLock);
-        }
-    }
-    mLock.unlock();
-    return false;
-}
-
-void AudioFlinger::DeviceEffectManager::CommandThread::sendCommand(sp<Command> command) {
-    Mutex::Autolock _l(mLock);
-    mCommands.push_back(command);
-    mWaitWorkCV.signal();
-}
-
-void AudioFlinger::DeviceEffectManager::CommandThread::createAudioPatchCommand(
-        audio_patch_handle_t handle, const PatchPanel::Patch& patch)
-{
-    sp<Command> command = new Command(CREATE_AUDIO_PATCH, new CreateAudioPatchData(handle, patch));
-    ALOGV("CommandThread() adding create patch handle %d mHalHandle %d.", handle, patch.mHalHandle);
-    sendCommand(command);
-}
-
-void AudioFlinger::DeviceEffectManager::CommandThread::releaseAudioPatchCommand(
-        audio_patch_handle_t handle)
-{
-    sp<Command> command = new Command(RELEASE_AUDIO_PATCH, new ReleaseAudioPatchData(handle));
-    ALOGV("CommandThread() adding release patch");
-    sendCommand(command);
-}
-
-void AudioFlinger::DeviceEffectManager::CommandThread::exit()
-{
-    ALOGV("CommandThread::exit");
-    {
-        AutoMutex _l(mLock);
-        requestExit();
-        mWaitWorkCV.signal();
-    }
-    // Note that we can call it from the thread loop if all other references have been released
-    // but it will safely return WOULD_BLOCK in this case
-    requestExitAndWait();
-}
-
 } // namespace android
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index d2faa70..7602f12 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -20,15 +20,15 @@
 #endif
 
 // DeviceEffectManager is concealed within AudioFlinger, their lifetimes are the same.
-class DeviceEffectManager {
+class DeviceEffectManager : public PatchCommandThread::PatchCommandListener {
 public:
-    explicit DeviceEffectManager(AudioFlinger* audioFlinger)
-        : mCommandThread(new CommandThread(*this)), mAudioFlinger(*audioFlinger),
-        mMyCallback(new DeviceEffectManagerCallback(this)) {}
+    explicit DeviceEffectManager(AudioFlinger& audioFlinger)
+        : mAudioFlinger(audioFlinger),
+          mMyCallback(new DeviceEffectManagerCallback(*this)) {}
 
-            ~DeviceEffectManager() {
-                mCommandThread->exit();
-            }
+    void onFirstRef() override {
+        mAudioFlinger.mPatchCommandThread->addListener(this);
+    }
 
     sp<EffectHandle> createEffect_l(effect_descriptor_t *descriptor,
                 const AudioDeviceTypeAddr& device,
@@ -39,8 +39,6 @@
                 status_t *status,
                 bool probe,
                 bool notifyFramesProcessed);
-    void createAudioPatch(audio_patch_handle_t handle, const PatchPanel::Patch& patch);
-    void releaseAudioPatch(audio_patch_handle_t handle);
 
     size_t removeEffect(const sp<DeviceEffectProxy>& effect);
     status_t createEffectHal(const effect_uuid_t *pEffectUuid,
@@ -59,94 +57,25 @@
 
     void dump(int fd);
 
+    // PatchCommandThread::PatchCommandListener implementation
+
+    void onCreateAudioPatch(audio_patch_handle_t handle,
+                            const PatchPanel::Patch& patch) override;
+    void onReleaseAudioPatch(audio_patch_handle_t handle) override;
+
 private:
-
-    // Thread to execute create and release patch commands asynchronously. This is needed because
-    // PatchPanel::createAudioPatch and releaseAudioPatch are executed from audio policy service
-    // with mutex locked and effect management requires to call back into audio policy service
-    class Command;
-    class CommandThread : public Thread {
-    public:
-
-        enum {
-            CREATE_AUDIO_PATCH,
-            RELEASE_AUDIO_PATCH,
-        };
-
-        CommandThread(DeviceEffectManager& manager)
-            : Thread(false), mManager(manager) {}
-        ~CommandThread() override;
-
-        // Thread virtuals
-        void onFirstRef() override;
-        bool threadLoop() override;
-
-                void exit();
-
-                void createAudioPatchCommand(audio_patch_handle_t handle,
-                        const PatchPanel::Patch& patch);
-                void releaseAudioPatchCommand(audio_patch_handle_t handle);
-
-    private:
-        class CommandData;
-
-        // descriptor for requested tone playback event
-        class Command: public RefBase {
-        public:
-            Command() = default;
-            Command(int command, sp<CommandData> data)
-                : mCommand(command), mData(data) {}
-
-            int mCommand = -1;
-            sp<CommandData> mData;
-        };
-
-        class CommandData: public RefBase {
-        public:
-            virtual ~CommandData() = default;
-        };
-
-        class CreateAudioPatchData : public CommandData {
-        public:
-            CreateAudioPatchData(audio_patch_handle_t handle, const PatchPanel::Patch& patch)
-                :   mHandle(handle), mPatch(patch) {}
-
-            audio_patch_handle_t mHandle;
-            const PatchPanel::Patch mPatch;
-        };
-
-        class ReleaseAudioPatchData : public CommandData {
-        public:
-            ReleaseAudioPatchData(audio_patch_handle_t handle)
-                :   mHandle(handle) {}
-
-            audio_patch_handle_t mHandle;
-        };
-
-        void sendCommand(sp<Command> command);
-
-        Mutex   mLock;
-        Condition mWaitWorkCV;
-        std::deque <sp<Command>> mCommands; // list of pending commands
-        DeviceEffectManager& mManager;
-    };
-
-    void onCreateAudioPatch(audio_patch_handle_t handle, const PatchPanel::Patch& patch);
-    void onReleaseAudioPatch(audio_patch_handle_t handle);
-
     status_t checkEffectCompatibility(const effect_descriptor_t *desc);
 
     Mutex mLock;
-    sp<CommandThread> mCommandThread;
     AudioFlinger &mAudioFlinger;
     const sp<DeviceEffectManagerCallback> mMyCallback;
     std::map<AudioDeviceTypeAddr, sp<DeviceEffectProxy>> mDeviceEffects;
 };
 
-class DeviceEffectManagerCallback :  public EffectCallbackInterface {
+class DeviceEffectManagerCallback : public EffectCallbackInterface {
 public:
-            DeviceEffectManagerCallback(DeviceEffectManager *manager)
-                : mManager(*manager) {}
+    DeviceEffectManagerCallback(DeviceEffectManager& manager)
+        : mManager(manager) {}
 
     status_t createEffectHal(const effect_uuid_t *pEffectUuid,
            int32_t sessionId, int32_t deviceId,
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 7b4ae24..ca1bba6 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1597,7 +1597,7 @@
     return isHapticGenerator(&mDescriptor.type);
 }
 
-status_t AudioFlinger::EffectModule::setHapticIntensity(int id, int intensity)
+status_t AudioFlinger::EffectModule::setHapticIntensity(int id, os::HapticScale intensity)
 {
     if (mStatus != NO_ERROR) {
         return mStatus;
@@ -1613,7 +1613,7 @@
     param->vsize = sizeof(int32_t) * 2;
     *(int32_t*)param->data = HG_PARAM_HAPTIC_INTENSITY;
     *((int32_t*)param->data + 1) = id;
-    *((int32_t*)param->data + 2) = intensity;
+    *((int32_t*)param->data + 2) = static_cast<int32_t>(intensity);
     std::vector<uint8_t> response;
     status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
     if (status == NO_ERROR) {
@@ -2676,7 +2676,7 @@
     return false;
 }
 
-void AudioFlinger::EffectChain::setHapticIntensity_l(int id, int intensity)
+void AudioFlinger::EffectChain::setHapticIntensity_l(int id, os::HapticScale intensity)
 {
     Mutex::Autolock _l(mLock);
     for (size_t i = 0; i < mEffects.size(); ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 78788df..72ec0e5 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -280,7 +280,7 @@
     static bool      isHapticGenerator(const effect_uuid_t* type);
     bool             isHapticGenerator() const;
 
-    status_t         setHapticIntensity(int id, int intensity);
+    status_t         setHapticIntensity(int id, os::HapticScale intensity);
     status_t         setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo);
 
     status_t         getConfigs(audio_config_base_t* inputCfg,
@@ -550,7 +550,7 @@
 
     bool containsHapticGeneratingEffect_l();
 
-    void setHapticIntensity_l(int id, int intensity);
+    void setHapticIntensity_l(int id, os::HapticScale intensity);
 
     sp<EffectCallbackInterface> effectCallback() const { return mEffectCallback; }
     wp<ThreadBase> thread() const { return mEffectCallback->thread(); }
diff --git a/services/audioflinger/MelReporter.cpp b/services/audioflinger/MelReporter.cpp
new file mode 100644
index 0000000..9a579dd
--- /dev/null
+++ b/services/audioflinger/MelReporter.cpp
@@ -0,0 +1,127 @@
+/*
+**
+** Copyright 2022, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "AudioFlinger::MelReporter"
+
+#include <cinttypes>
+#include <utils/Log.h>
+#include <android-base/stringprintf.h>
+#include <audio_utils/power.h>
+
+#include "AudioFlinger.h"
+
+namespace android {
+
+bool AudioFlinger::MelReporter::shouldComputeMelForDeviceType(audio_devices_t device) {
+    switch (device) {
+        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+        case AUDIO_DEVICE_OUT_HEARING_AID:
+        case AUDIO_DEVICE_OUT_USB_HEADSET:
+        case AUDIO_DEVICE_OUT_BLE_HEADSET:
+        case AUDIO_DEVICE_OUT_BLE_BROADCAST:
+            return true;
+        default:
+            return false;
+    }
+}
+
+void AudioFlinger::MelReporter::onCreateAudioPatch(audio_patch_handle_t handle,
+        const PatchPanel::Patch& patch) {
+    ALOGV("%s: handle %d mHalHandle %d device sink %08x",
+            __func__, handle, patch.mHalHandle,
+            patch.mAudioPatch.num_sinks > 0 ? patch.mAudioPatch.sinks[0].ext.device.type : 0);
+    if (patch.mAudioPatch.num_sources == 0
+        || patch.mAudioPatch.sources[0].type != AUDIO_PORT_TYPE_MIX) {
+        ALOGW("%s: patch does not contain any mix sources", __func__);
+        return;
+    }
+
+    audio_io_handle_t streamHandle = patch.mAudioPatch.sources[0].ext.mix.handle;
+    ActiveMelPatch newPatch;
+    newPatch.streamHandle = streamHandle;
+    for (int i = 0; i < patch.mAudioPatch.num_sinks; ++ i) {
+        if (patch.mAudioPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE
+            && shouldComputeMelForDeviceType(patch.mAudioPatch.sinks[i].ext.device.type)) {
+            audio_port_handle_t deviceId = patch.mAudioPatch.sinks[i].id;
+            newPatch.deviceHandles.push_back(deviceId);
+
+            // Start the MEL calculation in the PlaybackThread
+            std::lock_guard _lAf(mAudioFlinger.mLock);
+            auto thread = mAudioFlinger.checkPlaybackThread_l(streamHandle);
+            if (thread != nullptr) {
+                thread->startMelComputation(mMelAggregator.getOrCreateCallbackForDevice(
+                    deviceId,
+                    newPatch.streamHandle));
+            }
+        }
+    }
+
+    std::lock_guard _l(mLock);
+    mActiveMelPatches[patch.mAudioPatch.id] = newPatch;
+}
+
+void AudioFlinger::MelReporter::onReleaseAudioPatch(audio_patch_handle_t handle) {
+    ALOGV("%s", __func__);
+
+    ActiveMelPatch melPatch;
+    {
+        std::lock_guard _l(mLock);
+
+        auto patchIt = mActiveMelPatches.find(handle);
+        if (patchIt == mActiveMelPatches.end()) {
+            ALOGW(
+                "%s patch does not contain any mix sources with active MEL calculation",
+                __func__);
+            return;
+        }
+
+        melPatch = patchIt->second;
+        mActiveMelPatches.erase(patchIt);
+    }
+
+    // Stop MEL calculation for the PlaybackThread
+    std::lock_guard _lAf(mAudioFlinger.mLock);
+    auto thread = mAudioFlinger.checkPlaybackThread_l(melPatch.streamHandle);
+    if (thread != nullptr) {
+        thread->stopMelComputation();
+    }
+    mMelAggregator.removeStreamCallback(melPatch.streamHandle);
+}
+
+std::string AudioFlinger::MelReporter::dump() {
+    std::lock_guard _l(mLock);
+    std::string output;
+
+    base::StringAppendF(&output, "\nMel Reporter:\n");
+    mMelAggregator.foreach([&output](const audio_utils::MelRecord& melRecord) {
+        base::StringAppendF(&output, "Continuous MELs for portId=%d, ", melRecord.portId);
+        base::StringAppendF(&output, "starting at timestamp %" PRId64 ": ", melRecord.timestamp);
+
+        for (const auto& mel : melRecord.mels) {
+            base::StringAppendF(&output, "%d ", mel);
+        }
+        base::StringAppendF(&output, "\n");
+    });
+
+    return output;
+}
+
+}  // namespace android
diff --git a/services/audioflinger/MelReporter.h b/services/audioflinger/MelReporter.h
new file mode 100644
index 0000000..905a4cd
--- /dev/null
+++ b/services/audioflinger/MelReporter.h
@@ -0,0 +1,71 @@
+/*
+**
+** Copyright 2022, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+    #error This header file should only be included from AudioFlinger.h
+#endif
+
+#include <unordered_map>
+#include <mutex>
+
+constexpr static int kMaxTimestampDeltaInSec = 120;
+
+/**
+ * Class for listening to new patches and starting the MEL computation. MelReporter is
+ * concealed within AudioFlinger, their lifetimes are the same.
+ */
+class MelReporter : public PatchCommandThread::PatchCommandListener {
+public:
+    explicit MelReporter(AudioFlinger& audioFlinger)
+        : mAudioFlinger(audioFlinger),
+          mMelAggregator(kMaxTimestampDeltaInSec) {}
+
+    void onFirstRef() override {
+        mAudioFlinger.mPatchCommandThread->addListener(this);
+    }
+
+    /** Returns true if we should compute MEL for the given device. */
+    static bool shouldComputeMelForDeviceType(audio_devices_t device);
+
+    // For now only support internal MelReporting
+    [[nodiscard]] bool isHalReportingEnabled() const { return false; }
+
+    std::string dump();
+
+    // PatchCommandListener methods
+    void onCreateAudioPatch(audio_patch_handle_t handle,
+                            const PatchPanel::Patch& patch) override;
+    void onReleaseAudioPatch(audio_patch_handle_t handle) override;
+
+private:
+    AudioFlinger& mAudioFlinger;  // does not own the object
+
+    audio_utils::MelAggregator mMelAggregator;
+
+    struct ActiveMelPatch {
+        audio_io_handle_t streamHandle{AUDIO_IO_HANDLE_NONE};
+        std::vector<audio_port_handle_t> deviceHandles;
+    };
+
+    /**
+     * Lock for protecting the active mel patches. Do not mix with the AudioFlinger lock.
+     * Locking order AudioFlinger::mLock -> PatchCommandThread::mLock -> MelReporter::mLock.
+     */
+    std::mutex mLock;
+    std::unordered_map<audio_patch_handle_t, ActiveMelPatch>
+        mActiveMelPatches GUARDED_BY(AudioFlinger::MelReporter::mLock);
+};
diff --git a/services/audioflinger/PatchCommandThread.cpp b/services/audioflinger/PatchCommandThread.cpp
new file mode 100644
index 0000000..c3cb7e7
--- /dev/null
+++ b/services/audioflinger/PatchCommandThread.cpp
@@ -0,0 +1,156 @@
+/*
+**
+** Copyright 2022, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioFlinger::PatchCommandThread"
+//#define LOG_NDEBUG 0
+
+#include "AudioFlinger.h"
+
+namespace android {
+
+constexpr char kPatchCommandThreadName[] = "AudioFlinger_PatchCommandThread";
+
+AudioFlinger::PatchCommandThread::~PatchCommandThread() {
+    exit();
+
+    std::lock_guard _l(mLock);
+    mCommands.clear();
+}
+
+void AudioFlinger::PatchCommandThread::onFirstRef() {
+    run(kPatchCommandThreadName, ANDROID_PRIORITY_AUDIO);
+}
+
+void AudioFlinger::PatchCommandThread::addListener(const sp<PatchCommandListener>& listener) {
+    ALOGV("%s add listener %p", __func__, static_cast<void*>(listener.get()));
+    std::lock_guard _l(mListenerLock);
+    mListeners.emplace_back(listener);
+}
+
+void AudioFlinger::PatchCommandThread::createAudioPatch(audio_patch_handle_t handle,
+        const PatchPanel::Patch& patch) {
+    ALOGV("%s handle %d mHalHandle %d num sinks %d device sink %08x",
+            __func__, handle, patch.mHalHandle,
+            patch.mAudioPatch.num_sinks,
+            patch.mAudioPatch.num_sinks > 0 ? patch.mAudioPatch.sinks[0].ext.device.type : 0);
+
+    createAudioPatchCommand(handle, patch);
+}
+
+void AudioFlinger::PatchCommandThread::releaseAudioPatch(audio_patch_handle_t handle) {
+    ALOGV("%s", __func__);
+    releaseAudioPatchCommand(handle);
+}
+
+bool AudioFlinger::PatchCommandThread::threadLoop() {
+    std::unique_lock _l(mLock);
+
+    while (!exitPending()) {
+        while (!mCommands.empty() && !exitPending()) {
+            const sp<Command> command = mCommands.front();
+            mCommands.pop_front();
+            _l.unlock();
+
+            std::vector<wp<PatchCommandListener>> listenersCopy;
+            {
+                std::lock_guard _ll(mListenerLock);
+                listenersCopy = mListeners;
+            }
+
+            switch (command->mCommand) {
+                case CREATE_AUDIO_PATCH: {
+                    const auto data = (CreateAudioPatchData*) command->mData.get();
+                    ALOGV("%s processing create audio patch handle %d",
+                          __func__,
+                          data->mHandle);
+
+                    for (const auto& listener : listenersCopy) {
+                        auto spListener = listener.promote();
+                        if (spListener) {
+                            spListener->onCreateAudioPatch(data->mHandle, data->mPatch);
+                        }
+                    }
+                }
+                    break;
+                case RELEASE_AUDIO_PATCH: {
+                    const auto data = (ReleaseAudioPatchData*) command->mData.get();
+                    ALOGV("%s processing release audio patch handle %d",
+                          __func__,
+                          data->mHandle);
+
+                    for (const auto& listener : listenersCopy) {
+                        auto spListener = listener.promote();
+                        if (spListener) {
+                            spListener->onReleaseAudioPatch(data->mHandle);
+                        }
+                    }
+                }
+                    break;
+                default:
+                    ALOGW("%s unknown command %d", __func__, command->mCommand);
+                    break;
+            }
+            _l.lock();
+        }
+
+        // At this stage we have either an empty command queue or the first command in the queue
+        // has a finite delay. So unless we are exiting it is safe to wait.
+        if (!exitPending()) {
+            ALOGV("%s going to sleep", __func__);
+            mWaitWorkCV.wait(_l);
+        }
+    }
+    return false;
+}
+
+void AudioFlinger::PatchCommandThread::sendCommand(const sp<Command>& command) {
+    std::lock_guard _l(mLock);
+    mCommands.emplace_back(command);
+    mWaitWorkCV.notify_one();
+}
+
+void AudioFlinger::PatchCommandThread::createAudioPatchCommand(
+        audio_patch_handle_t handle, const PatchPanel::Patch& patch) {
+    auto command = sp<Command>::make(CREATE_AUDIO_PATCH,
+                                     new CreateAudioPatchData(handle, patch));
+    ALOGV("%s adding create patch handle %d mHalHandle %d.",
+          __func__,
+          handle,
+          patch.mHalHandle);
+    sendCommand(command);
+}
+
+void AudioFlinger::PatchCommandThread::releaseAudioPatchCommand(audio_patch_handle_t handle) {
+    sp<Command> command =
+        sp<Command>::make(RELEASE_AUDIO_PATCH, new ReleaseAudioPatchData(handle));
+    ALOGV("%s adding release patch", __func__);
+    sendCommand(command);
+}
+
+void AudioFlinger::PatchCommandThread::exit() {
+    ALOGV("%s", __func__);
+    {
+        std::lock_guard _l(mLock);
+        requestExit();
+        mWaitWorkCV.notify_one();
+    }
+    // Note that we can call it from the thread loop if all other references have been released
+    // but it will safely return WOULD_BLOCK in this case
+    requestExitAndWait();
+}
+
+}  // namespace android
diff --git a/services/audioflinger/PatchCommandThread.h b/services/audioflinger/PatchCommandThread.h
new file mode 100644
index 0000000..b7853f0
--- /dev/null
+++ b/services/audioflinger/PatchCommandThread.h
@@ -0,0 +1,102 @@
+/*
+**
+** Copyright 2022, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+    #error This header file should only be included from AudioFlinger.h
+#endif
+
+class Command;
+
+// Thread to execute create and release patch commands asynchronously. This is needed because
+// PatchPanel::createAudioPatch and releaseAudioPatch are executed from audio policy service
+// with mutex locked and effect management requires to call back into audio policy service
+class PatchCommandThread : public Thread {
+public:
+
+    enum {
+        CREATE_AUDIO_PATCH,
+        RELEASE_AUDIO_PATCH,
+    };
+
+    class PatchCommandListener : public virtual RefBase {
+    public:
+        virtual void onCreateAudioPatch(audio_patch_handle_t handle,
+                                        const PatchPanel::Patch& patch) = 0;
+        virtual void onReleaseAudioPatch(audio_patch_handle_t handle) = 0;
+    };
+
+    PatchCommandThread() : Thread(false /* canCallJava */) {}
+    ~PatchCommandThread() override;
+
+    void addListener(const sp<PatchCommandListener>& listener);
+
+    void createAudioPatch(audio_patch_handle_t handle, const PatchPanel::Patch& patch);
+    void releaseAudioPatch(audio_patch_handle_t handle);
+
+    // Thread virtuals
+    void onFirstRef() override;
+    bool threadLoop() override;
+
+    void exit();
+
+    void createAudioPatchCommand(audio_patch_handle_t handle,
+            const PatchPanel::Patch& patch);
+    void releaseAudioPatchCommand(audio_patch_handle_t handle);
+
+private:
+    class CommandData;
+
+    // Command type received from the PatchPanel
+    class Command: public RefBase {
+    public:
+        Command() = default;
+        Command(int command, const sp<CommandData>& data)
+            : mCommand(command), mData(data) {}
+
+        const int mCommand = -1;
+        const sp<CommandData> mData;
+    };
+
+    class CommandData: public RefBase {};
+
+    class CreateAudioPatchData : public CommandData {
+    public:
+        CreateAudioPatchData(audio_patch_handle_t handle, const PatchPanel::Patch& patch)
+            :   mHandle(handle), mPatch(patch) {}
+
+        const audio_patch_handle_t mHandle;
+        const PatchPanel::Patch mPatch;
+    };
+
+    class ReleaseAudioPatchData : public CommandData {
+    public:
+        ReleaseAudioPatchData(audio_patch_handle_t handle)
+            :   mHandle(handle) {}
+
+        audio_patch_handle_t mHandle;
+    };
+
+    void sendCommand(const sp<Command>& command);
+
+    std::string mThreadName;
+    std::mutex mLock;
+    std::condition_variable mWaitWorkCV;
+    std::deque<sp<Command>> mCommands GUARDED_BY(mLock); // list of pending commands
+
+    std::mutex mListenerLock;
+    std::vector<wp<PatchCommandListener>> mListeners GUARDED_BY(mListenerLock);
+};
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 45dd258..3b428bb 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -313,12 +313,19 @@
                         patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
                         patch->sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
                 audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+                audio_source_t source = AUDIO_SOURCE_MIC;
+                // For telephony patches, propagate voice communication use case to record side
+                if (patch->num_sources == 2
+                        && patch->sources[1].ext.mix.usecase.stream
+                                == AUDIO_STREAM_VOICE_CALL) {
+                    source = AUDIO_SOURCE_VOICE_COMMUNICATION;
+                }
                 sp<ThreadBase> thread = mAudioFlinger.openInput_l(srcModule,
                                                                     &input,
                                                                     &config,
                                                                     device,
                                                                     address,
-                                                                    AUDIO_SOURCE_MIC,
+                                                                    source,
                                                                     flags,
                                                                     outputDevice,
                                                                     outputDeviceAddress);
@@ -442,7 +449,7 @@
     if (status == NO_ERROR) {
         *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
         newPatch.mHalHandle = halHandle;
-        mAudioFlinger.mDeviceEffectManager.createAudioPatch(*handle, newPatch);
+        mAudioFlinger.mPatchCommandThread->createAudioPatch(*handle, newPatch);
         if (insertedModule != AUDIO_MODULE_HANDLE_NONE) {
             addSoftwarePatchToInsertedModules(insertedModule, *handle, &newPatch.mAudioPatch);
         }
@@ -516,9 +523,14 @@
     audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
             mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
     audio_stream_type_t streamType = AUDIO_STREAM_PATCH;
+    audio_source_t source = AUDIO_SOURCE_DEFAULT;
     if (mAudioPatch.num_sources == 2 && mAudioPatch.sources[1].type == AUDIO_PORT_TYPE_MIX) {
         // "reuse one existing output mix" case
         streamType = mAudioPatch.sources[1].ext.mix.usecase.stream;
+        // For telephony patches, propagate voice communication use case to record side
+        if (streamType == AUDIO_STREAM_VOICE_CALL) {
+            source = AUDIO_SOURCE_VOICE_COMMUNICATION;
+        }
     }
     if (mPlayback.thread()->hasFastMixer()) {
         // Create a fast track if the playback thread has fast mixer to get better performance.
@@ -546,7 +558,8 @@
                                                  inChannelMask,
                                                  format,
                                                  frameCount,
-                                                 inputFlags);
+                                                 inputFlags,
+                                                 source);
     } else {
         // use a pseudo LCM between input and output framecount
         int playbackShift = __builtin_ctz(playbackFrameCount);
@@ -566,7 +579,9 @@
                                                  frameCount,
                                                  nullptr,
                                                  (size_t)0 /* bufferSize */,
-                                                 inputFlags);
+                                                 inputFlags,
+                                                 {} /* timeout */,
+                                                 source);
     }
     status = mRecord.checkTrack(tempRecordTrack.get());
     if (status != NO_ERROR) {
@@ -785,7 +800,7 @@
 void AudioFlinger::PatchPanel::erasePatch(audio_patch_handle_t handle) {
     mPatches.erase(handle);
     removeSoftwarePatchFromInsertedModules(handle);
-    mAudioFlinger.mDeviceEffectManager.releaseAudioPatch(handle);
+    mAudioFlinger.mPatchCommandThread->releaseAudioPatch(handle);
 }
 
 /* List connected audio ports and they attributes */
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index e8552c4..daec57e 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -87,6 +87,10 @@
                                     && (flags & AUDIO_INPUT_FLAG_HW_AV_SYNC) == 0;
                         }
 
+            using SinkMetadatas = std::vector<record_track_metadata_v7_t>;
+            using MetadataInserter = std::back_insert_iterator<SinkMetadatas>;
+            virtual void    copyMetadataTo(MetadataInserter& backInserter) const;
+
 private:
     friend class AudioFlinger;  // for mState
 
@@ -134,7 +138,8 @@
                 void *buffer,
                 size_t bufferSize,
                 audio_input_flags_t flags,
-                const Timeout& timeout = {});
+                const Timeout& timeout = {},
+                audio_source_t source = AUDIO_SOURCE_DEFAULT);
     virtual             ~PatchRecord();
 
     virtual Source* getSource() { return nullptr; }
@@ -166,7 +171,8 @@
                         audio_channel_mask_t channelMask,
                         audio_format_t format,
                         size_t frameCount,
-                        audio_input_flags_t flags);
+                        audio_input_flags_t flags,
+                        audio_source_t source = AUDIO_SOURCE_DEFAULT);
 
     Source* getSource() override { return static_cast<Source*>(this); }
 
diff --git a/services/audioflinger/TEST_MAPPING b/services/audioflinger/TEST_MAPPING
index 9aff137..5d3fb0a 100644
--- a/services/audioflinger/TEST_MAPPING
+++ b/services/audioflinger/TEST_MAPPING
@@ -1,5 +1,5 @@
 {
-  "postsubmit": [
+  "presubmit": [
     {
       "name": "CtsNativeMediaAAudioTestCases",
       "options" : [
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b021c5e..a566e19 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -17,7 +17,7 @@
 
 
 #define LOG_TAG "AudioFlinger"
-//#define LOG_NDEBUG 0
+// #define LOG_NDEBUG 0
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
 #include "Configuration.h"
@@ -44,6 +44,7 @@
 #include <private/media/AudioTrackShared.h>
 #include <private/android_filesystem_config.h>
 #include <audio_utils/Balance.h>
+#include <audio_utils/MelProcessor.h>
 #include <audio_utils/Metadata.h>
 #include <audio_utils/channels.h>
 #include <audio_utils/mono_blend.h>
@@ -791,6 +792,7 @@
                                             (CreateAudioPatchConfigEventData *)event->mData.get();
             event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
             const DeviceTypeSet newDevices = getDeviceTypes();
+            configChanged = oldDevices != newDevices;
             mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
                     dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
                     dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
@@ -801,6 +803,7 @@
                                             (ReleaseAudioPatchConfigEventData *)event->mData.get();
             event->mStatus = releaseAudioPatch_l(data->mHandle);
             const DeviceTypeSet newDevices = getDeviceTypes();
+            configChanged = oldDevices != newDevices;
             mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
                     dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
                     dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
@@ -2742,7 +2745,7 @@
             // Unlock due to VibratorService will lock for this call and will
             // call Tracks.mute/unmute which also require thread's lock.
             mLock.unlock();
-            const int intensity = AudioFlinger::onExternalVibrationStart(
+            const os::HapticScale intensity = AudioFlinger::onExternalVibrationStart(
                     track->getExternalVibration());
             std::optional<media::AudioVibratorInfo> vibratorInfo;
             {
@@ -2752,7 +2755,7 @@
                 vibratorInfo = std::move(mAudioFlinger->getDefaultVibratorInfo_l());
             }
             mLock.lock();
-            track->setHapticIntensity(static_cast<os::HapticScale>(intensity));
+            track->setHapticIntensity(intensity);
             if (vibratorInfo) {
                 track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
             }
@@ -3333,6 +3336,7 @@
         }
         ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count);
         ATRACE_END();
+
         if (framesWritten > 0) {
             bytesWritten = framesWritten * mFrameSize;
 #ifdef TEE_SINK
@@ -3341,6 +3345,11 @@
         } else {
             bytesWritten = framesWritten;
         }
+
+        auto processor = mMelProcessor.load();
+        if (processor) {
+            processor->process((char *)mSinkBuffer + offset, bytesWritten);
+        }
     // otherwise use the HAL / AudioStreamOut directly
     } else {
         // Direct output and offload threads
@@ -3377,6 +3386,21 @@
     return bytesWritten;
 }
 
+void AudioFlinger::PlaybackThread::startMelComputation(const sp<
+        audio_utils::MelProcessor::MelCallback>& callback)
+{
+    ALOGV("%s: creating new mel processor for thread %d", __func__, id());
+    mMelProcessor = sp<audio_utils::MelProcessor>::make(mSampleRate,
+                                                        mChannelCount,
+                                                        mFormat,
+                                                        callback);
+}
+
+void AudioFlinger::PlaybackThread::stopMelComputation() {
+    ALOGV("%s: stopping mel processor for thread %d", __func__, id());
+    mMelProcessor = nullptr;
+}
+
 void AudioFlinger::PlaybackThread::threadLoop_drain()
 {
     bool supportsDrain = false;
@@ -3435,9 +3459,15 @@
     mActiveSleepTimeUs = activeSleepTimeUs();
     mIdleSleepTimeUs = idleSleepTimeUs();
 
+    mStandbyDelayNs = AudioFlinger::mStandbyTimeInNsecs;
+    // Shorten standby delay on VOIP RX output to avoid delayed routing updates
+    // after a call due to call end tone.
+    if (mOutput != nullptr && (mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
+        const nsecs_t NS_PER_MS = 1000000;
+        mStandbyDelayNs = std::min(mStandbyDelayNs, latency_l() * NS_PER_MS);
+    }
     // make sure standby delay is not too short when connected to an A2DP sink to avoid
     // truncating audio when going to standby.
-    mStandbyDelayNs = AudioFlinger::mStandbyTimeInNsecs;
     if (!Intersection(outDeviceTypes(),  getAudioDeviceOutAllA2dpSet()).empty()) {
         if (mStandbyDelayNs < kDefaultStandbyTimeInNsecs) {
             mStandbyDelayNs = kDefaultStandbyTimeInNsecs;
@@ -4497,7 +4527,7 @@
             // When the track is stop, set the haptic intensity as MUTE
             // for the HapticGenerator effect.
             if (chain != nullptr) {
-                chain->setHapticIntensity_l(track->id(), static_cast<int>(os::HapticScale::MUTE));
+                chain->setHapticIntensity_l(track->id(), os::HapticScale::MUTE);
             }
         }
     }
@@ -6143,8 +6173,10 @@
 // ----------------------------------------------------------------------------
 
 AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
-        AudioStreamOut* output, audio_io_handle_t id, ThreadBase::type_t type, bool systemReady)
+        AudioStreamOut* output, audio_io_handle_t id, ThreadBase::type_t type, bool systemReady,
+        const audio_offload_info_t& offloadInfo)
     :   PlaybackThread(audioFlinger, output, id, type, systemReady)
+    , mOffloadInfo(offloadInfo)
 {
     setMasterBalance(audioFlinger->getMasterBalance_l());
 }
@@ -6423,7 +6455,8 @@
                 // fill a buffer, then remove it from active list.
                 // Only consider last track started for mixer state control
                 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
-                if (--(track->mRetryCount) <= 0) {
+                if (!isTunerStream()  // tuner streams remain active in underrun
+                        && --(track->mRetryCount) <= 0) {
                     if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
                         track->mRetryCount = kMaxTrackRetriesOffload;
                     } else {
@@ -6467,6 +6500,7 @@
             (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
         status_t result = mOutput->stream->pause();
         ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
+        doHwResume = !doHwPause;  // resume if pause is due to flush.
     }
     if (mFlushPending) {
         flushHw_l();
@@ -6786,8 +6820,9 @@
 
 // ----------------------------------------------------------------------------
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
-        AudioStreamOut* output, audio_io_handle_t id, bool systemReady)
-    :   DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady),
+        AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
+        const audio_offload_info_t& offloadInfo)
+    :   DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady, offloadInfo),
         mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
 {
     //FIXME: mStandby should be set to true by ThreadBase constructo
@@ -7006,7 +7041,8 @@
                 // No buffers for this track. Give it a few chances to
                 // fill a buffer, then remove it from active list.
                 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
-                if (--(track->mRetryCount) <= 0) {
+                if (!isTunerStream()  // tuner streams remain active in underrun
+                        && --(track->mRetryCount) <= 0) {
                     if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
                         track->mRetryCount = kMaxTrackRetriesOffload;
                     } else {
@@ -7035,6 +7071,7 @@
     if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
         status_t result = mOutput->stream->pause();
         ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
+        doHwResume = !doHwPause;  // resume if pause is due to flush.
     }
     if (mFlushPending) {
         flushHw_l();
@@ -7354,6 +7391,27 @@
     if (status != INVALID_OPERATION) {
         updateHalSupportedLatencyModes_l();
     }
+
+    // update priority if specified.
+    constexpr int32_t kRTPriorityMin = 1;
+    constexpr int32_t kRTPriorityMax = 3;
+    const int32_t priorityBoost =
+            property_get_int32("audio.spatializer.priority", kRTPriorityMin);
+    if (priorityBoost >= kRTPriorityMin && priorityBoost <= kRTPriorityMax) {
+        const pid_t pid = getpid();
+        const pid_t tid = getTid();
+
+        if (tid == -1) {
+            // Unusual: PlaybackThread::onFirstRef() should set the threadLoop running.
+            ALOGW("%s: audio.spatializer.priority %d ignored, thread not running",
+                    __func__, priorityBoost);
+        } else {
+            ALOGD("%s: audio.spatializer.priority %d, allowing real time for pid %d  tid %d",
+                    __func__, priorityBoost, pid, tid);
+            sendPrioConfigEvent_l(pid, tid, priorityBoost, false /*forApp*/);
+            stream()->setHalThreadPriority(priorityBoost);
+        }
+    }
 }
 
 status_t AudioFlinger::SpatializerThread::createAudioPatch_l(const struct audio_patch *patch,
@@ -8348,8 +8406,6 @@
     audio_input_flags_t inputFlags = mInput->flags;
     audio_input_flags_t requestedFlags = *flags;
     uint32_t sampleRate;
-    AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
-            attributionSource);
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
@@ -8364,7 +8420,7 @@
     }
 
     if (maxSharedAudioHistoryMs != 0) {
-        if (!captureHotwordAllowed(checkedAttributionSource)) {
+        if (!captureHotwordAllowed(attributionSource)) {
             lStatus = PERMISSION_DENIED;
             goto Exit;
         }
@@ -8485,16 +8541,16 @@
         Mutex::Autolock _l(mLock);
         int32_t startFrames = -1;
         if (!mSharedAudioPackageName.empty()
-                && mSharedAudioPackageName == checkedAttributionSource.packageName
+                && mSharedAudioPackageName == attributionSource.packageName
                 && mSharedAudioSessionId == sessionId
-                && captureHotwordAllowed(checkedAttributionSource)) {
+                && captureHotwordAllowed(attributionSource)) {
             startFrames = mSharedAudioStartFrames;
         }
 
         track = new RecordTrack(this, client, attr, sampleRate,
                       format, channelMask, frameCount,
                       nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
-                      checkedAttributionSource, *flags, TrackBase::TYPE_DEFAULT, portId,
+                      attributionSource, *flags, TrackBase::TYPE_DEFAULT, portId,
                       startFrames);
 
         lStatus = track->initCheck();
@@ -8794,21 +8850,9 @@
         return; // nothing to do
     }
     StreamInHalInterface::SinkMetadata metadata;
+    auto backInserter = std::back_inserter(metadata.tracks);
     for (const sp<RecordTrack> &track : mActiveTracks) {
-        // Do not forward PatchRecord metadata to audio HAL
-        if (track->isPatchTrack()) {
-            continue;
-        }
-        // No track is invalid as this is called after prepareTrack_l in the same critical section
-        record_track_metadata_v7_t trackMetadata;
-        trackMetadata.base = {
-                .source = track->attributes().source,
-                .gain = 1, // capture tracks do not have volumes
-        };
-        trackMetadata.channel_mask = track->channelMask(),
-        strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
-
-        metadata.tracks.push_back(trackMetadata);
+        track->copyMetadataTo(backInserter);
     }
     mInput->stream->updateSinkMetadata(metadata);
 }
@@ -10282,19 +10326,22 @@
 
 void AudioFlinger::MmapThread::checkInvalidTracks_l()
 {
+    sp<MmapStreamCallback> callback;
     for (const sp<MmapTrack> &track : mActiveTracks) {
         if (track->isInvalid()) {
-            sp<MmapStreamCallback> callback = mCallback.promote();
-            if (callback != 0) {
-                mLock.unlock();
-                callback->onTearDown(track->portId());
-                mLock.lock();
-            } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
-                ALOGW("Could not notify MMAP stream tear down: no onTearDown callback!");
+            callback = mCallback.promote();
+            if (callback == nullptr &&  mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+                ALOGW("Could not notify MMAP stream tear down: no onRoutingChanged callback!");
                 mNoCallbackWarningCount++;
             }
+            break;
         }
     }
+    if (callback != 0) {
+        mLock.unlock();
+        callback->onRoutingChanged(AUDIO_PORT_HANDLE_NONE);
+        mLock.lock();
+    }
 }
 
 void AudioFlinger::MmapThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b477d65..a7028ee 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1091,6 +1091,10 @@
                     return INVALID_OPERATION;
                 }
 
+                void startMelComputation(const sp
+                             <audio_utils::MelProcessor::MelCallback>& callback);
+                void stopMelComputation();
+
 protected:
     // updated by readOutputParameters_l()
     size_t                          mNormalFrameCount;  // normal mixer and effects
@@ -1190,6 +1194,8 @@
     audio_channel_mask_t            mMixerChannelMask = AUDIO_CHANNEL_NONE;
 
 private:
+    mediautils::atomic_sp<audio_utils::MelProcessor> mMelProcessor;
+
     // mMasterMute is in both PlaybackThread and in AudioFlinger.  When a
     // PlaybackThread needs to find out if master-muted, it checks it's local
     // copy rather than the one in AudioFlinger.  This optimization saves a lock.
@@ -1546,8 +1552,9 @@
 public:
 
     DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                       audio_io_handle_t id, bool systemReady)
-        : DirectOutputThread(audioFlinger, output, id, DIRECT, systemReady) { }
+                       audio_io_handle_t id, bool systemReady,
+                       const audio_offload_info_t& offloadInfo)
+        : DirectOutputThread(audioFlinger, output, id, DIRECT, systemReady, offloadInfo) { }
 
     virtual                 ~DirectOutputThread();
 
@@ -1579,11 +1586,14 @@
 
     virtual     void        onAddNewTrack_l();
 
+    const       audio_offload_info_t mOffloadInfo;
     bool mVolumeShaperActive = false;
 
     DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                       audio_io_handle_t id, ThreadBase::type_t type, bool systemReady);
+                       audio_io_handle_t id, ThreadBase::type_t type, bool systemReady,
+                       const audio_offload_info_t& offloadInfo);
     void processVolume_l(Track *track, bool lastTrack);
+    bool isTunerStream() const { return (mOffloadInfo.content_id > 0); }
 
     // prepareTracks_l() tells threadLoop_mix() the name of the single active track
     sp<Track>               mActiveTrack;
@@ -1621,7 +1631,8 @@
 public:
 
     OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                  audio_io_handle_t id, bool systemReady);
+                  audio_io_handle_t id, bool systemReady,
+                  const audio_offload_info_t& offloadInfo);
     virtual                 ~OffloadThread() {};
                 void        flushHw_l() override;
 
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index cfb296c..3837ad2 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -530,10 +530,7 @@
             id, attr.flags);
         return nullptr;
     }
-
-    AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
-            attributionSource);
-    return new OpPlayAudioMonitor(checkedAttributionSource, attr.usage, id);
+    return new OpPlayAudioMonitor(attributionSource, attr.usage, id);
 }
 
 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
@@ -1481,7 +1478,7 @@
         }
     }
 
-    metadata.channel_mask = mChannelMask,
+    metadata.channel_mask = mChannelMask;
     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
     *backInserter++ = metadata;
 }
@@ -2060,7 +2057,6 @@
 {
     Buffer *pInBuffer;
     Buffer inBuffer;
-    bool outputBufferFull = false;
     inBuffer.frameCount = frames;
     inBuffer.raw = data;
 
@@ -2090,7 +2086,6 @@
                 ALOGV("%s(%d): thread %d no more output buffers; status %d",
                         __func__, mId,
                         (int)mThreadIoHandle, status);
-                outputBufferFull = true;
                 break;
             }
             uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
@@ -2786,6 +2781,25 @@
     }
 }
 
+void AudioFlinger::RecordThread::RecordTrack::copyMetadataTo(MetadataInserter& backInserter) const
+{
+
+    // Do not forward PatchRecord metadata with unspecified audio source
+    if (mAttr.source == AUDIO_SOURCE_DEFAULT) {
+        return;
+    }
+
+    // No track is invalid as this is called after prepareTrack_l in the same critical section
+    record_track_metadata_v7_t metadata;
+    metadata.base = {
+            .source = mAttr.source,
+            .gain = 1, // capture tracks do not have volumes
+    };
+    metadata.channel_mask = mChannelMask;
+    strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
+
+    *backInserter++ = metadata;
+}
 
 // ----------------------------------------------------------------------------
 #undef LOG_TAG
@@ -2799,9 +2813,10 @@
                                                      void *buffer,
                                                      size_t bufferSize,
                                                      audio_input_flags_t flags,
-                                                     const Timeout& timeout)
+                                                     const Timeout& timeout,
+                                                     audio_source_t source)
     :   RecordTrack(recordThread, NULL,
-                audio_attributes_t{} /* currently unused for patch track */,
+                audio_attributes_t{ .source = source } ,
                 sampleRate, format, channelMask, frameCount,
                 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
                 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
@@ -2912,9 +2927,10 @@
         audio_channel_mask_t channelMask,
         audio_format_t format,
         size_t frameCount,
-        audio_input_flags_t flags)
+        audio_input_flags_t flags,
+        audio_source_t source)
         : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
-                nullptr /*buffer*/, 0 /*bufferSize*/, flags),
+                nullptr /*buffer*/, 0 /*bufferSize*/, flags, {} /* timeout */, source),
           mPatchRecordAudioBufferProvider(*this),
           mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
           mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index c4c27e8..b85382e 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -307,13 +307,13 @@
     virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
 
     virtual status_t getProductStrategyFromAudioAttributes(
-            const AudioAttributes &aa, product_strategy_t &productStrategy,
+            const audio_attributes_t &aa, product_strategy_t &productStrategy,
             bool fallbackOnDefault) = 0;
 
     virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) = 0;
 
     virtual status_t getVolumeGroupFromAudioAttributes(
-            const AudioAttributes &aa, volume_group_t &volumeGroup, bool fallbackOnDefault) = 0;
+            const audio_attributes_t &aa, volume_group_t &volumeGroup, bool fallbackOnDefault) = 0;
 
     virtual bool     isCallScreenModeSupported() = 0;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index afffcd5..54a143c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -72,13 +72,16 @@
      */
     status_t getOutputForAttr(const audio_attributes_t& attributes,
                               const audio_config_base_t& config,
-                              uid_t uid, audio_output_flags_t flags,
+                              uid_t uid,
+                              audio_session_t session,
+                              audio_output_flags_t flags,
                               sp<AudioPolicyMix> &primaryMix,
                               std::vector<sp<AudioPolicyMix>> *secondaryMixes);
 
-    sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
+    sp<DeviceDescriptor> getDeviceAndMixForInputSource(const audio_attributes_t& attributes,
                                                        const DeviceVector &availableDeviceTypes,
                                                        uid_t uid,
+                                                       audio_session_t session,
                                                        sp<AudioPolicyMix> *policyMix) const;
 
     /**
@@ -124,11 +127,13 @@
     void dump(String8 *dst) const;
 
 private:
-    enum class MixMatchStatus { MATCH, NO_MATCH };
-    MixMatchStatus mixMatch(const AudioMix* mix, size_t mixIndex,
+    bool mixMatch(const AudioMix* mix, size_t mixIndex,
                             const audio_attributes_t& attributes,
                             const audio_config_base_t& config,
-                            uid_t uid);
+                            uid_t uid,
+                            audio_session_t session);
 };
 
+std::optional<std::string> extractAddressFromAudioAttributes(const audio_attributes_t& attr);
+
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 65fab43..003dcaf 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -18,6 +18,9 @@
 //#define LOG_NDEBUG 0
 
 #include <algorithm>
+#include <iterator>
+#include <optional>
+#include <regex>
 #include "AudioPolicyMix.h"
 #include "TypeConverter.h"
 #include "HwModule.h"
@@ -28,6 +31,70 @@
 namespace android {
 namespace {
 
+bool matchAddressToTags(const audio_attributes_t& attr, const String8& addr) {
+    std::optional<std::string> tagAddress = extractAddressFromAudioAttributes(attr);
+    return tagAddress.has_value() && tagAddress->compare(addr.c_str()) == 0;
+}
+
+// Returns true if the criterion matches.
+// The exclude criteria are handled in the same way as positive
+// ones - only condition is matched (the function will return
+// same result both for RULE_MATCH_X and RULE_EXCLUDE_X).
+bool isCriterionMatched(const AudioMixMatchCriterion& criterion,
+                        const audio_attributes_t& attr,
+                        const uid_t uid,
+                        const audio_session_t session) {
+    uint32_t ruleWithoutExclusion = criterion.mRule & ~RULE_EXCLUSION_MASK;
+    switch(ruleWithoutExclusion) {
+        case RULE_MATCH_ATTRIBUTE_USAGE:
+            return criterion.mValue.mUsage == attr.usage;
+        case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
+            return criterion.mValue.mSource == attr.source;
+        case RULE_MATCH_UID:
+            return criterion.mValue.mUid == uid;
+        case RULE_MATCH_USERID:
+            {
+                userid_t userId = multiuser_get_user_id(uid);
+                return criterion.mValue.mUserId == userId;
+            }
+        case RULE_MATCH_AUDIO_SESSION_ID:
+            return criterion.mValue.mAudioSessionId == session;
+    }
+    ALOGE("Encountered invalid mix rule 0x%x", criterion.mRule);
+    return false;
+}
+
+// Returns true if vector of criteria is matched:
+// - If any of the exclude criteria is matched the criteria doesn't match.
+// - Otherwise, for each 'dimension' of positive rule present
+//   (usage, capture preset, uid, userid...) at least one rule must match
+//   for the criteria to match.
+bool areMixCriteriaMatched(const std::vector<AudioMixMatchCriterion>& criteria,
+                           const audio_attributes_t& attr,
+                           const uid_t uid,
+                           const audio_session_t session) {
+    // If any of the exclusion criteria are matched the mix doesn't match.
+    auto isMatchingExcludeCriterion = [&](const AudioMixMatchCriterion& c) {
+        return c.isExcludeCriterion() && isCriterionMatched(c, attr, uid, session);
+    };
+    if (std::any_of(criteria.begin(), criteria.end(), isMatchingExcludeCriterion)) {
+        return false;
+    }
+
+    uint32_t presentPositiveRules = 0; // Bitmask of all present positive criteria.
+    uint32_t matchedPositiveRules = 0; // Bitmask of all matched positive criteria.
+    for (const auto& criterion : criteria) {
+        if (criterion.isExcludeCriterion()) {
+            continue;
+        }
+        presentPositiveRules |= criterion.mRule;
+        if (isCriterionMatched(criterion, attr, uid, session)) {
+            matchedPositiveRules |= criterion.mRule;
+        }
+    }
+    return presentPositiveRules == matchedPositiveRules;
+}
+
 // Consistency checks: for each "dimension" of rules (usage, uid...), we can
 // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination.
 bool areMixCriteriaConsistent(const std::vector<AudioMixMatchCriterion>& criteria) {
@@ -95,6 +162,9 @@
         case RULE_MATCH_USERID:
             ruleValue = std::to_string(criterion.mValue.mUserId);
             break;
+        case RULE_MATCH_AUDIO_SESSION_ID:
+            ruleValue = std::to_string(criterion.mValue.mAudioSessionId);
+            break;
         default:
             unknownRule = true;
         }
@@ -186,7 +256,8 @@
 }
 
 status_t AudioPolicyMixCollection::getOutputForAttr(
-        const audio_attributes_t& attributes, const audio_config_base_t& config, uid_t uid,
+        const audio_attributes_t& attributes, const audio_config_base_t& config, const uid_t uid,
+        const audio_session_t session,
         audio_output_flags_t flags,
         sp<AudioPolicyMix> &primaryMix,
         std::vector<sp<AudioPolicyMix>> *secondaryMixes)
@@ -212,7 +283,7 @@
             continue; // Primary output already found
         }
 
-        if(mixMatch(policyMix.get(), i, attributes, config, uid) == MixMatchStatus::NO_MATCH) {
+        if(!mixMatch(policyMix.get(), i, attributes, config, uid, session)) {
             ALOGV("%s: Mix %zu: does not match", __func__, i);
             continue; // skip the mix
         }
@@ -230,9 +301,9 @@
     return NO_ERROR;
 }
 
-AudioPolicyMixCollection::MixMatchStatus AudioPolicyMixCollection::mixMatch(
-        const AudioMix* mix, size_t mixIndex, const audio_attributes_t& attributes,
-        const audio_config_base_t& config, uid_t uid) {
+bool AudioPolicyMixCollection::mixMatch(const AudioMix* mix, size_t mixIndex,
+    const audio_attributes_t& attributes, const audio_config_base_t& config,
+    uid_t uid, audio_session_t session) {
 
     if (mix->mMixType == MIX_TYPE_PLAYERS) {
         // Loopback render mixes are created from a public API and thus restricted
@@ -242,20 +313,20 @@
                   attributes.usage == AUDIO_USAGE_MEDIA ||
                   attributes.usage == AUDIO_USAGE_GAME ||
                   attributes.usage == AUDIO_USAGE_VOICE_COMMUNICATION)) {
-                return MixMatchStatus::NO_MATCH;
+                return false;
             }
             auto hasFlag = [](auto flags, auto flag) { return (flags & flag) == flag; };
             if (hasFlag(attributes.flags, AUDIO_FLAG_NO_SYSTEM_CAPTURE)) {
-                return MixMatchStatus::NO_MATCH;
+                return false;
             }
 
             if (attributes.usage == AUDIO_USAGE_VOICE_COMMUNICATION) {
                 if (!mix->mVoiceCommunicationCaptureAllowed) {
-                    return MixMatchStatus::NO_MATCH;
+                    return false;
                 }
             } else if (!mix->mAllowPrivilegedMediaPlaybackCapture &&
                 hasFlag(attributes.flags, AUDIO_FLAG_NO_MEDIA_PROJECTION)) {
-                return MixMatchStatus::NO_MATCH;
+                return false;
             }
         }
 
@@ -265,132 +336,22 @@
             !((audio_is_linear_pcm(config.format) && audio_is_linear_pcm(mix->mFormat.format)) ||
               (config.format == mix->mFormat.format)) &&
               config.format != AUDIO_CONFIG_BASE_INITIALIZER.format) {
-            return MixMatchStatus::NO_MATCH;
+            return false;
         }
 
-        int userId = (int) multiuser_get_user_id(uid);
-
-        // TODO if adding more player rules (currently only 2), make rule handling "generic"
-        //      as there is no difference in the treatment of usage- or uid-based rules
-        bool hasUsageMatchRules = false;
-        bool hasUsageExcludeRules = false;
-        bool usageMatchFound = false;
-        bool usageExclusionFound = false;
-
-        bool hasUidMatchRules = false;
-        bool hasUidExcludeRules = false;
-        bool uidMatchFound = false;
-        bool uidExclusionFound = false;
-
-        bool hasUserIdExcludeRules = false;
-        bool userIdExclusionFound = false;
-        bool hasUserIdMatchRules = false;
-        bool userIdMatchFound = false;
-
-
-        bool hasAddrMatch = false;
-
-        // iterate over all mix criteria to list what rules this mix contains
-        for (size_t j = 0; j < mix->mCriteria.size(); j++) {
-            ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
-                    mixIndex, j, mix->mCriteria.size());
-
-            // if there is an address match, prioritize that match
-            if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                    strncmp(attributes.tags + strlen("addr="),
-                            mix->mDeviceAddress.string(),
-                            AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                hasAddrMatch = true;
-                break;
-            }
-
-            switch (mix->mCriteria[j].mRule) {
-            case RULE_MATCH_ATTRIBUTE_USAGE:
-                ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
-                                            mix->mCriteria[j].mValue.mUsage);
-                hasUsageMatchRules = true;
-                if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
-                    // found one match against all allowed usages
-                    usageMatchFound = true;
-                }
-                break;
-            case RULE_EXCLUDE_ATTRIBUTE_USAGE:
-                ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
-                        mix->mCriteria[j].mValue.mUsage);
-                hasUsageExcludeRules = true;
-                if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
-                    // found this usage is to be excluded
-                    usageExclusionFound = true;
-                }
-                break;
-            case RULE_MATCH_UID:
-                ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
-                hasUidMatchRules = true;
-                if (mix->mCriteria[j].mValue.mUid == uid) {
-                    // found one UID match against all allowed UIDs
-                    uidMatchFound = true;
-                }
-                break;
-            case RULE_EXCLUDE_UID:
-                ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
-                hasUidExcludeRules = true;
-                if (mix->mCriteria[j].mValue.mUid == uid) {
-                    // found this UID is to be excluded
-                    uidExclusionFound = true;
-                }
-                break;
-            case RULE_MATCH_USERID:
-                ALOGV("\tmix has RULE_MATCH_USERID for userId %d",
-                    mix->mCriteria[j].mValue.mUserId);
-                hasUserIdMatchRules = true;
-                if (mix->mCriteria[j].mValue.mUserId == userId) {
-                    // found one userId match against all allowed userIds
-                    userIdMatchFound = true;
-                }
-                break;
-            case RULE_EXCLUDE_USERID:
-                ALOGV("\tmix has RULE_EXCLUDE_USERID for userId %d",
-                    mix->mCriteria[j].mValue.mUserId);
-                hasUserIdExcludeRules = true;
-                if (mix->mCriteria[j].mValue.mUserId == userId) {
-                    // found this userId is to be excluded
-                    userIdExclusionFound = true;
-                }
-                break;
-            default:
-                break;
-            }
-
-
-            if ((hasUsageExcludeRules && usageExclusionFound)
-                    || (hasUidExcludeRules && uidExclusionFound)
-                    || (hasUserIdExcludeRules && userIdExclusionFound)) {
-                break; // stop iterating on criteria because an exclusion was found (will fail)
-            }
-        }//iterate on mix criteria
-
-        // determine if exiting on success (or implicit failure as desc is 0)
-        if (hasAddrMatch ||
-                !((hasUsageExcludeRules && usageExclusionFound) ||
-                  (hasUsageMatchRules && !usageMatchFound)  ||
-                  (hasUidExcludeRules && uidExclusionFound) ||
-                  (hasUidMatchRules && !uidMatchFound) ||
-                  (hasUserIdExcludeRules && userIdExclusionFound) ||
-                  (hasUserIdMatchRules && !userIdMatchFound))) {
-            ALOGV("\tgetOutputForAttr will use mix %zu", mixIndex);
-            return MixMatchStatus::MATCH;
+        // if there is an address match, prioritize that match
+        if (matchAddressToTags(attributes, mix->mDeviceAddress)
+            || areMixCriteriaMatched(mix->mCriteria, attributes, uid, session)) {
+                ALOGV("\tgetOutputForAttr will use mix %zu", mixIndex);
+                return true;
         }
-
     } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
         if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
-                strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                strncmp(attributes.tags + strlen("addr="),
-                        mix->mDeviceAddress.string(),
-                        AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-            return MixMatchStatus::MATCH;
+            matchAddressToTags(attributes, mix->mDeviceAddress)) {
+            return true;
         }
     }
-    return MixMatchStatus::NO_MATCH;
+    return false;
 }
 
 sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForOutput(
@@ -411,9 +372,10 @@
 }
 
 sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
-        audio_source_t inputSource,
+        const audio_attributes_t& attributes,
         const DeviceVector &availDevices,
         uid_t uid,
+        audio_session_t session,
         sp<AudioPolicyMix> *policyMix) const
 {
     for (size_t i = 0; i < size(); i++) {
@@ -421,28 +383,17 @@
         if (mix->mMixType != MIX_TYPE_RECORDERS) {
             continue;
         }
-        for (size_t j = 0; j < mix->mCriteria.size(); j++) {
-            if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mValue.mSource == inputSource) ||
-               (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mValue.mSource != inputSource) ||
-               (RULE_MATCH_UID == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mValue.mUid == uid) ||
-               (RULE_EXCLUDE_UID == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mValue.mUid != uid)) {
-                // assuming PolicyMix only for remote submix for input
-                // so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
-                audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-                auto mixDevice =
-                        availDevices.getDevice(device, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
+        if (areMixCriteriaMatched(mix->mCriteria, attributes, uid, session)) {
+            // Assuming PolicyMix only for remote submix for input
+            // so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX.
+            auto mixDevice = availDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+             mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
                 if (mixDevice != nullptr) {
                     if (policyMix != nullptr) {
                         *policyMix = mix;
                     }
                     return mixDevice;
                 }
-                break;
-            }
         }
     }
     return nullptr;
@@ -451,14 +402,14 @@
 status_t AudioPolicyMixCollection::getInputMixForAttr(
         audio_attributes_t attr, sp<AudioPolicyMix> *policyMix)
 {
-    if (strncmp(attr.tags, "addr=", strlen("addr=")) != 0) {
+    std::optional<std::string> address = extractAddressFromAudioAttributes(attr);
+    if (!address.has_value()) {
         return BAD_VALUE;
     }
-    String8 address(attr.tags + strlen("addr="));
 
 #ifdef LOG_NDEBUG
     ALOGV("getInputMixForAttr looking for address %s for source %d\n  mixes available:",
-            address.string(), attr.source);
+            address->c_str(), attr.source);
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioPolicyMix> audioPolicyMix = itemAt(i);
         ALOGV("\tmix %zu address=%s", i, audioPolicyMix->mDeviceAddress.string());
@@ -468,20 +419,20 @@
     size_t index;
     for (index = 0; index < size(); index++) {
         const sp<AudioPolicyMix>& registeredMix = itemAt(index);
-        if (registeredMix->mDeviceAddress.compare(address) == 0) {
+        if (address->compare(registeredMix->mDeviceAddress.c_str()) == 0) {
             ALOGD("getInputMixForAttr found addr=%s dev=0x%x",
                     registeredMix->mDeviceAddress.string(), registeredMix->mDeviceType);
             break;
         }
     }
     if (index == size()) {
-        ALOGW("getInputMixForAttr() no policy for address %s", address.string());
+        ALOGW("getInputMixForAttr() no policy for address %s", address->c_str());
         return BAD_VALUE;
     }
     const sp<AudioPolicyMix> audioPolicyMix = itemAt(index);
 
     if (audioPolicyMix->mMixType != MIX_TYPE_PLAYERS) {
-        ALOGW("getInputMixForAttr() bad policy mix type for address %s", address.string());
+        ALOGW("getInputMixForAttr() bad policy mix type for address %s", address->c_str());
         return BAD_VALUE;
     }
     if (policyMix != nullptr) {
@@ -681,4 +632,14 @@
     }
 }
 
+std::optional<std::string> extractAddressFromAudioAttributes(const audio_attributes_t& attr) {
+    static const std::regex addrTagRegex("addr=([^;]+)");
+
+    std::cmatch match;
+    if (std::regex_search(attr.tags, match, addrTagRegex)) {
+        return match[1].str();
+    }
+    return std::nullopt;
+}
+
 }; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index c5b3546..8a44547 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -56,10 +56,12 @@
     MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_UID),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_USERID),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_AUDIO_SESSION_ID),
     MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_USAGE),
     MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET),
     MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_UID),
     MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_USERID),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_AUDIO_SESSION_ID),
     TERMINATOR
 };
 
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 2aa2f9a..e8251e3 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -24,7 +24,7 @@
 #include <vector>
 
 #include <HandleGenerator.h>
-#include <media/AudioAttributes.h>
+#include <media/VolumeGroupAttributes.h>
 #include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioPolicy.h>
@@ -43,20 +43,14 @@
 class ProductStrategy : public virtual RefBase, private HandleGenerator<uint32_t>
 {
 private:
-    struct AudioAttributes {
-        audio_stream_type_t mStream = AUDIO_STREAM_DEFAULT;
-        volume_group_t mVolumeGroup = VOLUME_GROUP_NONE;
-        audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
-    };
-
-    using AudioAttributesVector = std::vector<AudioAttributes>;
+    using VolumeGroupAttributesVector = std::vector<VolumeGroupAttributes>;
 
 public:
     ProductStrategy(const std::string &name);
 
-    void addAttributes(const AudioAttributes &audioAttributes);
+    void addAttributes(const VolumeGroupAttributes &volumeGroupAttributes);
 
-    std::vector<android::AudioAttributes> listAudioAttributes() const;
+    std::vector<android::VolumeGroupAttributes> listVolumeGroupAttributes() const;
 
     std::string getName() const { return mName; }
     AttributesVector getAudioAttributes() const;
@@ -105,7 +99,7 @@
 private:
     std::string mName;
 
-    AudioAttributesVector mAttributesVector;
+    VolumeGroupAttributesVector mAttributesVector;
 
     product_strategy_t mId;
 
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 99507ee..9b78758 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -145,7 +145,7 @@
     };
     auto addSupportedAttributesToGroup = [](auto &group, auto &volumeGroup, auto &strategy) {
         for (const auto &attr : group.attributesVect) {
-            strategy->addAttributes({group.stream, volumeGroup->getId(), attr});
+            strategy->addAttributes({volumeGroup->getId(), group.stream, attr});
             volumeGroup->addSupportedAttributes(attr);
         }
     };
@@ -284,7 +284,7 @@
     for (const auto &iter : mProductStrategies) {
         const auto &productStrategy = iter.second;
         strategies.push_back(
-        {productStrategy->getName(), productStrategy->listAudioAttributes(),
+        {productStrategy->getName(), productStrategy->listVolumeGroupAttributes(),
          productStrategy->getId()});
     }
     return NO_ERROR;
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 60b16ac..548a20d 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include <EngineConfig.h>
+
 #include <system/audio.h>
 
 namespace android {
@@ -138,19 +140,19 @@
  * For compatibility reason why apm volume config file, volume group name is the stream type.
  */
 const engineConfig::ProductStrategies gOrderedSystemStrategies = {
-    {"rerouting",
+    {"STRATEGY_REROUTING",
      {
          {AUDIO_STREAM_REROUTING, "AUDIO_STREAM_REROUTING",
           {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VIRTUAL_SOURCE, AUDIO_SOURCE_DEFAULT,
-            AUDIO_FLAG_NONE, ""}}
+            AUDIO_FLAG_NONE, AUDIO_TAG_APM_RESERVED_INTERNAL}}
          }
      },
     },
-    {"patch",
+    {"STRATEGY_PATCH",
      {
          {AUDIO_STREAM_PATCH, "AUDIO_STREAM_PATCH",
           {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
-            AUDIO_FLAG_NONE, ""}}
+            AUDIO_FLAG_NONE, AUDIO_TAG_APM_RESERVED_INTERNAL}}
          }
      },
     }
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index fbfcf72..c104c97 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -36,16 +36,16 @@
 {
 }
 
-void ProductStrategy::addAttributes(const AudioAttributes &audioAttributes)
+void ProductStrategy::addAttributes(const VolumeGroupAttributes &volumeGroupAttributes)
 {
-    mAttributesVector.push_back(audioAttributes);
+    mAttributesVector.push_back(volumeGroupAttributes);
 }
 
-std::vector<android::AudioAttributes> ProductStrategy::listAudioAttributes() const
+std::vector<android::VolumeGroupAttributes> ProductStrategy::listVolumeGroupAttributes() const
 {
-    std::vector<android::AudioAttributes> androidAa;
+    std::vector<android::VolumeGroupAttributes> androidAa;
     for (const auto &attr : mAttributesVector) {
-        androidAa.push_back({attr.mVolumeGroup, attr.mStream, attr.mAttributes});
+        androidAa.push_back({attr.getGroupId(), attr.getStreamType(), attr.getAttributes()});
     }
     return androidAa;
 }
@@ -54,7 +54,7 @@
 {
     AttributesVector attrVector;
     for (const auto &attrGroup : mAttributesVector) {
-        attrVector.push_back(attrGroup.mAttributes);
+        attrVector.push_back(attrGroup.getAttributes());
     }
     if (not attrVector.empty()) {
         return attrVector;
@@ -66,7 +66,7 @@
 {
     return std::find_if(begin(mAttributesVector), end(mAttributesVector),
                         [&attr](const auto &supportedAttr) {
-        return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr);
+        return AudioProductStrategy::attributesMatches(supportedAttr.getAttributes(), attr);
     }) != end(mAttributesVector);
 }
 
@@ -75,11 +75,11 @@
 {
     const auto &iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
                                    [&attr](const auto &supportedAttr) {
-        return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr); });
+        return AudioProductStrategy::attributesMatches(supportedAttr.getAttributes(), attr); });
     if (iter == end(mAttributesVector)) {
         return AUDIO_STREAM_DEFAULT;
     }
-    audio_stream_type_t streamType = iter->mStream;
+    audio_stream_type_t streamType = iter->getStreamType();
     ALOGW_IF(streamType == AUDIO_STREAM_DEFAULT,
              "%s: Strategy %s supporting attributes %s has not stream type associated"
              "fallback on MUSIC. Do not use stream volume API", __func__, mName.c_str(),
@@ -91,23 +91,23 @@
 {
     const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
                                    [&streamType](const auto &supportedAttr) {
-        return supportedAttr.mStream == streamType; });
-    return iter != end(mAttributesVector) ? iter->mAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+        return supportedAttr.getStreamType() == streamType; });
+    return iter != end(mAttributesVector) ? iter->getAttributes() : AUDIO_ATTRIBUTES_INITIALIZER;
 }
 
 bool ProductStrategy::isDefault() const
 {
     return std::find_if(begin(mAttributesVector), end(mAttributesVector), [](const auto &attr) {
-        return attr.mAttributes == defaultAttr; }) != end(mAttributesVector);
+        return attr.getAttributes() == defaultAttr; }) != end(mAttributesVector);
 }
 
 StreamTypeVector ProductStrategy::getSupportedStreams() const
 {
     StreamTypeVector streams;
     for (const auto &supportedAttr : mAttributesVector) {
-        if (std::find(begin(streams), end(streams), supportedAttr.mStream) == end(streams) &&
-                supportedAttr.mStream != AUDIO_STREAM_DEFAULT) {
-            streams.push_back(supportedAttr.mStream);
+        if (std::find(begin(streams), end(streams), supportedAttr.getStreamType())
+                == end(streams) && supportedAttr.getStreamType() != AUDIO_STREAM_DEFAULT) {
+            streams.push_back(supportedAttr.getStreamType());
         }
     }
     return streams;
@@ -117,14 +117,14 @@
 {
     return std::find_if(begin(mAttributesVector), end(mAttributesVector),
                         [&streamType](const auto &supportedAttr) {
-        return supportedAttr.mStream == streamType; }) != end(mAttributesVector);
+        return supportedAttr.getStreamType() == streamType; }) != end(mAttributesVector);
 }
 
 volume_group_t ProductStrategy::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
 {
     for (const auto &supportedAttr : mAttributesVector) {
-        if (AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr)) {
-            return supportedAttr.mVolumeGroup;
+        if (AudioProductStrategy::attributesMatches(supportedAttr.getAttributes(), attr)) {
+            return supportedAttr.getGroupId();
         }
     }
     return VOLUME_GROUP_NONE;
@@ -133,8 +133,8 @@
 volume_group_t ProductStrategy::getVolumeGroupForStreamType(audio_stream_type_t stream) const
 {
     for (const auto &supportedAttr : mAttributesVector) {
-        if (supportedAttr.mStream == stream) {
-            return supportedAttr.mVolumeGroup;
+        if (supportedAttr.getStreamType() == stream) {
+            return supportedAttr.getGroupId();
         }
     }
     return VOLUME_GROUP_NONE;
@@ -143,8 +143,10 @@
 volume_group_t ProductStrategy::getDefaultVolumeGroup() const
 {
     const auto &iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
-                                    [](const auto &attr) {return attr.mAttributes == defaultAttr;});
-    return iter != end(mAttributesVector) ? iter->mVolumeGroup : VOLUME_GROUP_NONE;
+                                    [](const auto &attr) {
+        return attr.getAttributes() == defaultAttr;
+    });
+    return iter != end(mAttributesVector) ? iter->getGroupId() : VOLUME_GROUP_NONE;
 }
 
 void ProductStrategy::dump(String8 *dst, int spaces) const
@@ -155,11 +157,11 @@
                        deviceLiteral.c_str(), mDeviceAddress.c_str());
 
     for (const auto &attr : mAttributesVector) {
-        dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mVolumeGroup,
-                          android::toString(attr.mStream).c_str());
+        dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.getGroupId(),
+                          android::toString(attr.getStreamType()).c_str());
         dst->appendFormat("%*s Attributes: ", spaces + 3, "");
-        std::string attStr =
-                attr.mAttributes == defaultAttr ? "{ Any }" : android::toString(attr.mAttributes);
+        std::string attStr = attr.getAttributes() == defaultAttr ?
+                "{ Any }" : android::toString(attr.getAttributes());
         dst->appendFormat("%s\n", attStr.c_str());
     }
 }
diff --git a/services/audiopolicy/engine/common/src/VolumeCurve.cpp b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
index 8aa4b08..fccbc60 100644
--- a/services/audiopolicy/engine/common/src/VolumeCurve.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
@@ -52,7 +52,7 @@
             volIdx = volIndexMin;
         } else {
             // This would result in a divide-by-zero below
-            ALOG_ASSERT(volIndexmin != volIndexMax, "Invalid volume index range & value: 0");
+            ALOG_ASSERT(volIndexMin != volIndexMax, "Invalid volume index range & value: 0");
             return NAN;
         }
     } else {
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
index 8036eea..4de16c5 100644
--- a/services/audiopolicy/engine/config/include/EngineConfig.h
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -25,6 +25,12 @@
 struct _xmlNode;
 struct _xmlDoc;
 
+/**
+ * AudioAttributes custom tag to identify internal strategies, whose volumes are exclusively
+ * controlled by AudioPolicyManager
+ */
+#define AUDIO_TAG_APM_RESERVED_INTERNAL "reserved_internal_strategy"
+
 namespace android {
 namespace engineConfig {
 
diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
index 518f86e..70d25fc 100644
--- a/services/audiopolicy/engine/interface/EngineInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -173,10 +173,11 @@
      * @param[out] mix to be used if a mix has been installed for the given audio attributes.
      * @return selected input device for the audio attributes, may be null if error.
      */
-    virtual sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
-                                                             uid_t uid = 0,
-                                                             sp<AudioPolicyMix> *mix = nullptr)
-                                                             const = 0;
+    virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
+            const audio_attributes_t &attr,
+            uid_t uid = 0,
+            audio_session_t session = AUDIO_SESSION_NONE,
+            sp<AudioPolicyMix> *mix = nullptr) const = 0;
 
     /**
      * Get the legacy stream type for a given audio attributes.
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 3d74920..9d53017 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -315,6 +315,7 @@
 
 sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
                                                          uid_t uid,
+                                                         audio_session_t session,
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -333,9 +334,10 @@
         return device;
     }
 
-    device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+    device = policyMixes.getDeviceAndMixForInputSource(attr,
                                                        availableInputDevices,
                                                        uid,
+                                                       session,
                                                        mix);
     if (device != nullptr) {
         return device;
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 4b559f0..6ac20cd 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -63,6 +63,7 @@
 
     sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
                                                      uid_t uid = 0,
+                                                     audio_session_t session = AUDIO_SESSION_NONE,
                                                      sp<AudioPolicyMix> *mix = nullptr)
                                                      const override;
 
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 924b4ad..d96ae21 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -778,6 +778,7 @@
 
 sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
                                                          uid_t uid,
+                                                         audio_session_t session,
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -797,9 +798,10 @@
         return device;
     }
 
-    device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+    device = policyMixes.getDeviceAndMixForInputSource(attr,
                                                        availableInputDevices,
                                                        uid,
+                                                       session,
                                                        mix);
     if (device != nullptr) {
         return device;
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 1efdf24..ab556ee 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -64,6 +64,7 @@
 
     sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
                                                      uid_t uid = 0,
+                                                     audio_session_t session = AUDIO_SESSION_NONE,
                                                      sp<AudioPolicyMix> *mix = nullptr)
                                                      const override;
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index c029719..db2fd23 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -792,7 +792,8 @@
     ALOGV("%s between source %s and sink %s", __func__,
             srcDevice->toString().c_str(), sinkDevice->toString().c_str());
     auto callTxSourceClientPortId = PolicyAudioPort::getNextUniqueId();
-    const audio_attributes_t aa = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
+    const auto aa = mEngine->getAttributesForStreamType(AUDIO_STREAM_VOICE_CALL);
+
     struct audio_port_config source = {};
     srcDevice->toAudioPortConfig(&source);
     mCallTxSourceClient = new InternalSourceClientDescriptor(
@@ -1169,8 +1170,8 @@
         .channel_mask = config->channel_mask,
         .format = config->format,
     };
-    status = mPolicyMixes.getOutputForAttr(*resultAttr, clientConfig, uid, *flags, primaryMix,
-                                           secondaryMixes);
+    status = mPolicyMixes.getOutputForAttr(*resultAttr, clientConfig, uid, session, *flags,
+                                           primaryMix, secondaryMixes);
     if (status != OK) {
         return status;
     }
@@ -2504,7 +2505,7 @@
     *inputType = API_INPUT_INVALID;
 
     if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX &&
-            strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) {
+            extractAddressFromAudioAttributes(attributes).has_value()) {
         status = mPolicyMixes.getInputMixForAttr(attributes, &policyMix);
         if (status != NO_ERROR) {
             ALOGW("%s could not find input mix for attr %s",
@@ -2532,7 +2533,7 @@
         } else {
             // Prevent from storing invalid requested device id in clients
             requestedDeviceId = AUDIO_PORT_HANDLE_NONE;
-            device = mEngine->getInputDeviceForAttributes(attributes, uid, &policyMix);
+            device = mEngine->getInputDeviceForAttributes(attributes, uid, session, &policyMix);
             ALOGV_IF(device != nullptr, "%s found device type is 0x%X",
                 __FUNCTION__, device->type());
         }
@@ -2939,7 +2940,8 @@
             bool close = false;
             for (const auto& client : input->clientsList()) {
                 sp<DeviceDescriptor> device =
-                    mEngine->getInputDeviceForAttributes(client->attributes(), client->uid());
+                    mEngine->getInputDeviceForAttributes(client->attributes(), client->uid(),
+                                                         client->session());
                 if (!input->supportedDevices().contains(device)) {
                     close = true;
                     break;
@@ -4509,7 +4511,7 @@
                 // In case of Hw bridge, it is a Work Around. The mixPort used is the one declared
                 // in config XML to reach the sink so that is can be declared as available.
                 audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-                sp<SwAudioOutputDescriptor> outputDesc = nullptr;
+                sp<SwAudioOutputDescriptor> outputDesc;
                 if (!sourceDesc->isInternal()) {
                     // take care of dynamic routing for SwOutput selection,
                     audio_attributes_t attributes = sourceDesc->attributes();
@@ -4584,7 +4586,8 @@
                         audio_port_config srcMixPortConfig = {};
                         outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
                         // for volume control, we may need a valid stream
-                        srcMixPortConfig.ext.mix.usecase.stream = !sourceDesc->isInternal() ?
+                        srcMixPortConfig.ext.mix.usecase.stream =
+                            (!sourceDesc->isInternal() || isCallTxAudioSource(sourceDesc)) ?
                                     mEngine->getStreamTypeForAttributes(sourceDesc->attributes()) :
                                     AUDIO_STREAM_PATCH;
                         patchBuilder.addSource(srcMixPortConfig);
@@ -6334,7 +6337,7 @@
             }
             sp<AudioPolicyMix> primaryMix;
             status_t status = mPolicyMixes.getOutputForAttr(client->attributes(), client->config(),
-                    client->uid(), client->flags(), primaryMix, nullptr);
+                    client->uid(), client->session(), client->flags(), primaryMix, nullptr);
             if (status != OK) {
                 continue;
             }
@@ -6447,7 +6450,7 @@
             sp<AudioPolicyMix> primaryMix;
             std::vector<sp<AudioPolicyMix>> secondaryMixes;
             status_t status = mPolicyMixes.getOutputForAttr(client->attributes(), client->config(),
-                    client->uid(), client->flags(), primaryMix, &secondaryMixes);
+                    client->uid(), client->session(), client->flags(), primaryMix, &secondaryMixes);
             std::vector<sp<SwAudioOutputDescriptor>> secondaryDescs;
             for (auto &secondaryMix : secondaryMixes) {
                 sp<SwAudioOutputDescriptor> outputDesc = secondaryMix->getOutput();
@@ -6655,20 +6658,23 @@
     // a null sp<>, causing the patch on the input stream to be released.
     audio_attributes_t attributes;
     uid_t uid;
+    audio_session_t session;
     sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
     if (topClient != nullptr) {
         attributes = topClient->attributes();
         uid = topClient->uid();
+        session = topClient->session();
     } else {
         attributes = { .source = AUDIO_SOURCE_DEFAULT };
         uid = 0;
+        session = AUDIO_SESSION_NONE;
     }
 
     if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
         attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     }
     if (attributes.source != AUDIO_SOURCE_DEFAULT) {
-        device = mEngine->getInputDeviceForAttributes(attributes, uid);
+        device = mEngine->getInputDeviceForAttributes(attributes, uid, session);
     }
 
     return device;
@@ -7863,7 +7869,7 @@
     // audio routing, only used for duplication for playback capture)
     sp<AudioPolicyMix> policyMix;
     status_t status = mPolicyMixes.getOutputForAttr(attr, AUDIO_CONFIG_BASE_INITIALIZER,
-            0 /*uid unknown here*/, AUDIO_OUTPUT_FLAG_NONE, policyMix,
+            0 /*uid unknown here*/, AUDIO_SESSION_NONE, AUDIO_OUTPUT_FLAG_NONE, policyMix,
             nullptr /* secondaryMixes */);
     if (status != OK) {
         return status;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 74460c7..a3600a0 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -356,11 +356,10 @@
         }
 
         virtual status_t getProductStrategyFromAudioAttributes(
-                const AudioAttributes &aa, product_strategy_t &productStrategy,
+                const audio_attributes_t &aa, product_strategy_t &productStrategy,
                 bool fallbackOnDefault)
         {
-            productStrategy = mEngine->getProductStrategyForAttributes(
-                    aa.getAttributes(), fallbackOnDefault);
+            productStrategy = mEngine->getProductStrategyForAttributes(aa, fallbackOnDefault);
             return (fallbackOnDefault && productStrategy == PRODUCT_STRATEGY_NONE) ?
                     BAD_VALUE : NO_ERROR;
         }
@@ -371,10 +370,9 @@
         }
 
         virtual status_t getVolumeGroupFromAudioAttributes(
-                const AudioAttributes &aa, volume_group_t &volumeGroup, bool fallbackOnDefault)
+                const audio_attributes_t &aa, volume_group_t &volumeGroup, bool fallbackOnDefault)
         {
-            volumeGroup = mEngine->getVolumeGroupForAttributes(
-                        aa.getAttributes(), fallbackOnDefault);
+            volumeGroup = mEngine->getVolumeGroupForAttributes(aa, fallbackOnDefault);
             return (fallbackOnDefault && volumeGroup == VOLUME_GROUP_NONE) ?
                     BAD_VALUE : NO_ERROR;
         }
@@ -639,6 +637,10 @@
             return mCallRxSourceClient != nullptr && source == mCallRxSourceClient;
         }
 
+        bool isCallTxAudioSource(const sp<SourceClientDescriptor> &source) {
+            return mCallTxSourceClient != nullptr && source == mCallTxSourceClient;
+        }
+
         void connectTelephonyRxAudioSource();
 
         void disconnectTelephonyAudioSource(sp<SourceClientDescriptor> &clientDesc);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index b15b61d..b576b6d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -352,31 +352,20 @@
     ALOGV("%s()", __func__);
     Mutex::Autolock _l(mLock);
 
-    // TODO b/182392553: refactor or remove
-    AttributionSourceState adjAttributionSource = attributionSource;
-    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (!isAudioServerOrMediaServerUid(callingUid) || attributionSource.uid == -1) {
-        int32_t callingUidAidl = VALUE_OR_RETURN_BINDER_STATUS(
-            legacy2aidl_uid_t_int32_t(callingUid));
-        ALOGW_IF(attributionSource.uid != -1 && attributionSource.uid != callingUidAidl,
-                "%s uid %d tried to pass itself off as %d", __func__,
-                callingUidAidl, attributionSource.uid);
-        adjAttributionSource.uid = callingUidAidl;
-    }
     if (!mPackageManager.allowPlaybackCapture(VALUE_OR_RETURN_BINDER_STATUS(
-        aidl2legacy_int32_t_uid_t(adjAttributionSource.uid)))) {
+        aidl2legacy_int32_t_uid_t(attributionSource.uid)))) {
         attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_NO_MEDIA_PROJECTION);
     }
     if (((attr.flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
-            && !bypassInterruptionPolicyAllowed(adjAttributionSource)) {
+            && !bypassInterruptionPolicyAllowed(attributionSource)) {
         attr.flags = static_cast<audio_flags_mask_t>(
                 attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
     }
 
     if (attr.content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
-        if (!accessUltrasoundAllowed(adjAttributionSource)) {
+        if (!accessUltrasoundAllowed(attributionSource)) {
             ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
-                    __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+                    __func__, attributionSource.uid, attributionSource.pid);
             return binderStatusFromStatusT(PERMISSION_DENIED);
         }
     }
@@ -386,7 +375,7 @@
     bool isSpatialized = false;
     status_t result = mAudioPolicyManager->getOutputForAttr(&attr, &output, session,
                                                             &stream,
-                                                            adjAttributionSource,
+                                                            attributionSource,
                                                             &config,
                                                             &flags, &selectedDeviceId, &portId,
                                                             &secondaryOutputs,
@@ -401,20 +390,20 @@
             break;
         case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
             if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
-                && !callAudioInterceptionAllowed(adjAttributionSource)) {
+                && !callAudioInterceptionAllowed(attributionSource)) {
                 ALOGE("%s() permission denied: call redirection not allowed for uid %d",
-                    __func__, adjAttributionSource.uid);
+                    __func__, attributionSource.uid);
                 result = PERMISSION_DENIED;
-            } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+            } else if (!modifyPhoneStateAllowed(attributionSource)) {
                 ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
-                    __func__, adjAttributionSource.uid);
+                    __func__, attributionSource.uid);
                 result = PERMISSION_DENIED;
             }
             break;
         case AudioPolicyInterface::API_OUT_MIX_PLAYBACK:
-            if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
+            if (!modifyAudioRoutingAllowed(attributionSource)) {
                 ALOGE("%s() permission denied: modify audio routing not allowed for uid %d",
-                    __func__, adjAttributionSource.uid);
+                    __func__, attributionSource.uid);
                 result = PERMISSION_DENIED;
             }
             break;
@@ -427,7 +416,7 @@
 
     if (result == NO_ERROR) {
         sp<AudioPlaybackClient> client =
-                new AudioPlaybackClient(attr, output, adjAttributionSource, session,
+                new AudioPlaybackClient(attr, output, attributionSource, session,
                     portId, selectedDeviceId, stream, isSpatialized);
         mAudioPlaybackClients.add(portId, client);
 
@@ -620,33 +609,8 @@
         return binderStatusFromStatusT(BAD_VALUE);
     }
 
-    // Make sure attribution source represents the current caller
-    AttributionSourceState adjAttributionSource = attributionSource;
-    // TODO b/182392553: refactor or remove
-    bool updatePid = (attributionSource.pid == -1);
-    const uid_t callingUid =IPCThreadState::self()->getCallingUid();
-    const uid_t currentUid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(
-            attributionSource.uid));
-    if (!isAudioServerOrMediaServerUid(callingUid)) {
-        ALOGW_IF(currentUid != (uid_t)-1 && currentUid != callingUid,
-                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid,
-                currentUid);
-        adjAttributionSource.uid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_uid_t_int32_t(
-                callingUid));
-        updatePid = true;
-    }
-
-    if (updatePid) {
-        const int32_t callingPid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_pid_t_int32_t(
-            IPCThreadState::self()->getCallingPid()));
-        ALOGW_IF(attributionSource.pid != -1 && attributionSource.pid != callingPid,
-                 "%s uid %d pid %d tried to pass itself off as pid %d",
-                 __func__, adjAttributionSource.uid, callingPid, attributionSource.pid);
-        adjAttributionSource.pid = callingPid;
-    }
-
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
-            adjAttributionSource)));
+            attributionSource)));
 
     // check calling permissions.
     // Capturing from the following sources does not require permission RECORD_AUDIO
@@ -657,17 +621,17 @@
     // type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
     // is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
     // - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
-    if (!(recordingAllowed(adjAttributionSource, inputSource)
+    if (!(recordingAllowed(attributionSource, inputSource)
             || inputSource == AUDIO_SOURCE_FM_TUNER
             || inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
             || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for %s",
-                __func__, adjAttributionSource.toString().c_str());
+                __func__, attributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
-    bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
-    bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+    bool canCaptureOutput = captureAudioOutputAllowed(attributionSource);
+    bool canInterceptCallAudio = callAudioInterceptionAllowed(attributionSource);
     bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
              || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
              || inputSource == AUDIO_SOURCE_VOICE_CALL;
@@ -681,11 +645,11 @@
     }
     if (inputSource == AUDIO_SOURCE_FM_TUNER
         && !canCaptureOutput
-        && !captureTunerAudioInputAllowed(adjAttributionSource)) {
+        && !captureTunerAudioInputAllowed(attributionSource)) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
-    bool canCaptureHotword = captureHotwordAllowed(adjAttributionSource);
+    bool canCaptureHotword = captureHotwordAllowed(attributionSource);
     if ((inputSource == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -693,14 +657,14 @@
     if (((flags & AUDIO_INPUT_FLAG_HW_HOTWORD) != 0)
             && !canCaptureHotword) {
         ALOGE("%s: permission denied: hotword mode not allowed"
-              " for uid %d pid %d", __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+              " for uid %d pid %d", __func__, attributionSource.uid, attributionSource.pid);
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
     if (attr.source == AUDIO_SOURCE_ULTRASOUND) {
-        if (!accessUltrasoundAllowed(adjAttributionSource)) {
+        if (!accessUltrasoundAllowed(attributionSource)) {
             ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
-                    __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+                    __func__, attributionSource.uid, attributionSource.pid);
             return binderStatusFromStatusT(PERMISSION_DENIED);
         }
     }
@@ -715,7 +679,7 @@
             AutoCallerClear acc;
             // the audio_in_acoustics_t parameter is ignored by get_input()
             status = mAudioPolicyManager->getInputForAttr(&attr, &input, riid, session,
-                                                          adjAttributionSource, &config,
+                                                          attributionSource, &config,
                                                           flags, &selectedDeviceId,
                                                           &inputType, &portId);
 
@@ -744,7 +708,7 @@
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
-                if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+                if (!(modifyAudioRoutingAllowed(attributionSource)
                         || ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
                             && canInterceptCallAudio))) {
                     ALOGE("%s permission denied for remote submix capture", __func__);
@@ -770,7 +734,7 @@
         }
 
         sp<AudioRecordClient> client = new AudioRecordClient(attr, input, session, portId,
-                                                             selectedDeviceId, adjAttributionSource,
+                                                             selectedDeviceId, attributionSource,
                                                              canCaptureOutput, canCaptureHotword,
                                                              mOutputCommandThread);
         mAudioRecordClients.add(portId, client);
@@ -1176,12 +1140,12 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesEx& attrAidl,
+Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesInternal& attrAidl,
                                                    bool forVolume,
                                                    std::vector<AudioDevice>* _aidl_return)
 {
-    AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioAttributesEx_AudioAttributes(attrAidl));
+    audio_attributes_t aa = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     AudioDeviceTypeAddrVector devices;
 
     if (mAudioPolicyManager == NULL) {
@@ -1190,8 +1154,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
-            mAudioPolicyManager->getDevicesForAttributes(
-                    aa.getAttributes(), &devices, forVolume)));
+            mAudioPolicyManager->getDevicesForAttributes(aa, &devices, forVolume)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<std::vector<AudioDevice>>(devices,
                                                        legacy2aidl_AudioDeviceTypeAddress));
@@ -2084,9 +2047,10 @@
 }
 
 Status AudioPolicyService::getProductStrategyFromAudioAttributes(
-        const media::AudioAttributesEx& aaAidl, bool fallbackOnDefault, int32_t* _aidl_return) {
-    AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioAttributesEx_AudioAttributes(aaAidl));
+        const media::AudioAttributesInternal& aaAidl,
+        bool fallbackOnDefault, int32_t* _aidl_return) {
+    audio_attributes_t aa = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(aaAidl));
     product_strategy_t productStrategy;
 
     if (mAudioPolicyManager == NULL) {
@@ -2117,9 +2081,10 @@
 }
 
 Status AudioPolicyService::getVolumeGroupFromAudioAttributes(
-        const media::AudioAttributesEx& aaAidl, bool fallbackOnDefault, int32_t* _aidl_return) {
-    AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioAttributesEx_AudioAttributes(aaAidl));
+        const media::AudioAttributesInternal& aaAidl,
+        bool fallbackOnDefault, int32_t* _aidl_return) {
+    audio_attributes_t aa = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(aaAidl));
     volume_group_t volumeGroup;
 
     if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 92e1b6b..6f23fe7 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -1006,10 +1006,11 @@
         //     AND is on TOP or latest started
         //     AND there is no active privacy sensitive capture or call
         //             OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+        bool allowSensitiveCapture =
+            !isSensitiveActive || isTopOrLatestSensitive || current->canCaptureOutput;
         bool allowCapture = !isAssistantOnTop
                 && (isTopOrLatestActive || isTopOrLatestSensitive)
-                && !(isSensitiveActive
-                    && !(isTopOrLatestSensitive || current->canCaptureOutput))
+                && allowSensitiveCapture
                 && canCaptureIfInCallOrCommunication(current);
 
         if (!current->hasOp()) {
@@ -1032,7 +1033,7 @@
                 if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
                     allowCapture = true;
                 }
-            } else if (!(isSensitiveActive && !current->canCaptureOutput)
+            } else if (allowSensitiveCapture
                     && canCaptureIfInCallOrCommunication(current)) {
                 if (isTopOrLatestAssistant
                     && (source == AUDIO_SOURCE_VOICE_RECOGNITION
@@ -1053,7 +1054,7 @@
                 if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
                     allowCapture = true;
                 }
-            } else if (!(isSensitiveActive && !current->canCaptureOutput)
+            } else if (allowSensitiveCapture
                         && canCaptureIfInCallOrCommunication(current)) {
                 if ((source == AUDIO_SOURCE_VOICE_RECOGNITION) || (source == AUDIO_SOURCE_HOTWORD))
                 {
@@ -1068,7 +1069,7 @@
             //     OR
             //         Is on TOP AND the source is VOICE_RECOGNITION or HOTWORD
             if (!isAssistantOnTop
-                    && !(isSensitiveActive && !current->canCaptureOutput)
+                    && allowSensitiveCapture
                     && canCaptureIfInCallOrCommunication(current)) {
                 allowCapture = true;
             }
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 5c37f99..3a7fffa 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -134,7 +134,7 @@
                                                   int32_t* _aidl_return) override;
     binder::Status getStrategyForStream(AudioStreamType stream,
                                         int32_t* _aidl_return) override;
-    binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
+    binder::Status getDevicesForAttributes(const media::AudioAttributesInternal& attr,
                                            bool forVolume,
                                            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
@@ -224,12 +224,12 @@
     binder::Status isUltrasoundSupported(bool* _aidl_return) override;
     binder::Status listAudioProductStrategies(
             std::vector<media::AudioProductStrategy>* _aidl_return) override;
-    binder::Status getProductStrategyFromAudioAttributes(const media::AudioAttributesEx& aa,
+    binder::Status getProductStrategyFromAudioAttributes(const media::AudioAttributesInternal& aa,
                                                          bool fallbackOnDefault,
                                                          int32_t* _aidl_return) override;
     binder::Status listAudioVolumeGroups(
             std::vector<media::AudioVolumeGroup>* _aidl_return) override;
-    binder::Status getVolumeGroupFromAudioAttributes(const media::AudioAttributesEx& aa,
+    binder::Status getVolumeGroupFromAudioAttributes(const media::AudioAttributesInternal& aa,
                                                      bool fallbackOnDefault,
                                                      int32_t* _aidl_return) override;
     binder::Status setRttEnabled(bool enabled) override;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index af40d1f..d9b856b 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -741,6 +741,17 @@
     msg->post();
 }
 
+void Spatializer::resetEngineHeadPose_l() {
+    ALOGV("%s mEngine %p", __func__, mEngine.get());
+    if (mEngine == nullptr) {
+        return;
+    }
+    const std::vector<float> headToStage(6, 0.0);
+    setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
+    setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+            std::vector<SpatializerHeadTrackingMode>{SpatializerHeadTrackingMode::DISABLED});
+}
+
 void Spatializer::onHeadToStagePoseMsg(const std::vector<float>& headToStage) {
     ALOGV("%s", __func__);
     sp<media::ISpatializerHeadTrackingCallback> callback;
@@ -792,8 +803,12 @@
         }
         mActualHeadTrackingMode = spatializerMode;
         if (mEngine != nullptr) {
-            setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
-                                 std::vector<SpatializerHeadTrackingMode>{spatializerMode});
+            if (spatializerMode == SpatializerHeadTrackingMode::DISABLED) {
+                resetEngineHeadPose_l();
+            } else {
+                setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+                                     std::vector<SpatializerHeadTrackingMode>{spatializerMode});
+            }
         }
         callback = mHeadTrackingCallback;
         mLocalLog.log("%s: %s, spatializerMode %s", __func__, media::toString(mode).c_str(),
@@ -924,16 +939,25 @@
     bool lowLatencySupported = mSupportedLatencyModes.empty()
             || (std::find(mSupportedLatencyModes.begin(), mSupportedLatencyModes.end(),
                     AUDIO_LATENCY_MODE_LOW) != mSupportedLatencyModes.end());
-    if (mSupportsHeadTracking && mPoseController != nullptr) {
-        if (lowLatencySupported && mNumActiveTracks > 0 && mLevel != SpatializationLevel::NONE
-            && mDesiredHeadTrackingMode != HeadTrackingMode::STATIC
-            && mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
-            mPoseController->setHeadSensor(mHeadSensor);
-            mPoseController->setScreenSensor(mScreenSensor);
-            requestedLatencyMode = AUDIO_LATENCY_MODE_LOW;
+    if (mSupportsHeadTracking) {
+        if (mPoseController != nullptr) {
+            if (lowLatencySupported && mNumActiveTracks > 0 && mLevel != SpatializationLevel::NONE
+                && mDesiredHeadTrackingMode != HeadTrackingMode::STATIC
+                && mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
+                if (mEngine != nullptr) {
+                    setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+                            std::vector<SpatializerHeadTrackingMode>{mActualHeadTrackingMode});
+                }
+                mPoseController->setHeadSensor(mHeadSensor);
+                mPoseController->setScreenSensor(mScreenSensor);
+                requestedLatencyMode = AUDIO_LATENCY_MODE_LOW;
+            } else {
+                mPoseController->setHeadSensor(SpatializerPoseController::INVALID_SENSOR);
+                mPoseController->setScreenSensor(SpatializerPoseController::INVALID_SENSOR);
+                resetEngineHeadPose_l();
+            }
         } else {
-            mPoseController->setHeadSensor(SpatializerPoseController::INVALID_SENSOR);
-            mPoseController->setScreenSensor(SpatializerPoseController::INVALID_SENSOR);
+            resetEngineHeadPose_l();
         }
     }
     if (mOutput != AUDIO_IO_HANDLE_NONE) {
@@ -947,8 +971,6 @@
             mEngine->setEnabled(true);
             setEffectParameter_l(SPATIALIZER_PARAM_LEVEL,
                     std::vector<SpatializationLevel>{mLevel});
-            setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
-                    std::vector<SpatializerHeadTrackingMode>{mActualHeadTrackingMode});
         } else {
             setEffectParameter_l(SPATIALIZER_PARAM_LEVEL,
                     std::vector<SpatializationLevel>{SpatializationLevel::NONE});
@@ -970,6 +992,7 @@
         mPoseController->setDisplayOrientation(mDisplayOrientation);
     } else if (!isControllerNeeded && mPoseController != nullptr) {
         mPoseController.reset();
+        resetEngineHeadPose_l();
     }
     if (mPoseController != nullptr) {
         mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 9eb8c48..ba60cae 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -351,6 +351,12 @@
      */
     void checkEngineState_l() REQUIRES(mLock);
 
+    /**
+     * Reset head tracking mode and recenter pose in engine: Called when the head tracking
+     * is disabled.
+     */
+    void resetEngineHeadPose_l() REQUIRES(mLock);
+
     /** Effect engine descriptor */
     const effect_descriptor_t mEngineDescriptor;
     /** Callback interface to parent audio policy service */
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index d51c57c..ba5b6b2 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <cstring>
 #include <memory>
 #include <string>
 #include <sys/wait.h>
@@ -66,6 +67,14 @@
     return criterion;
 }
 
+AudioMixMatchCriterion createSessionIdCriterion(audio_session_t session, bool exclude = false) {
+    AudioMixMatchCriterion criterion;
+    criterion.mValue.mAudioSessionId = session;
+    criterion.mRule = exclude ?
+        RULE_EXCLUDE_AUDIO_SESSION_ID : RULE_MATCH_AUDIO_SESSION_ID;
+    return criterion;
+}
+
 } // namespace
 
 TEST(AudioPolicyManagerTestInit, EngineFailure) {
@@ -151,9 +160,11 @@
             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
             audio_io_handle_t *output = nullptr,
             audio_port_handle_t *portId = nullptr,
-            audio_attributes_t attr = {});
+            audio_attributes_t attr = {},
+            audio_session_t session = AUDIO_SESSION_NONE);
     void getInputForAttr(
             const audio_attributes_t &attr,
+            audio_session_t session,
             audio_unique_id_t riid,
             audio_port_handle_t *selectedDeviceId,
             audio_format_t format,
@@ -233,7 +244,8 @@
         audio_output_flags_t flags,
         audio_io_handle_t *output,
         audio_port_handle_t *portId,
-        audio_attributes_t attr) {
+        audio_attributes_t attr,
+        audio_session_t session) {
     audio_io_handle_t localOutput;
     if (!output) output = &localOutput;
     *output = AUDIO_IO_HANDLE_NONE;
@@ -252,7 +264,7 @@
     attributionSource.uid = 0;
     attributionSource.token = sp<BBinder>::make();
     ASSERT_EQ(OK, mManager->getOutputForAttr(
-                    &attr, output, AUDIO_SESSION_NONE, &stream, attributionSource, &config, &flags,
+                    &attr, output, session, &stream, attributionSource, &config, &flags,
                     selectedDeviceId, portId, {}, &outputType, &isSpatialized));
     ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
     ASSERT_NE(AUDIO_IO_HANDLE_NONE, *output);
@@ -260,6 +272,7 @@
 
 void AudioPolicyManagerTest::getInputForAttr(
         const audio_attributes_t &attr,
+        const audio_session_t session,
         audio_unique_id_t riid,
         audio_port_handle_t *selectedDeviceId,
         audio_format_t format,
@@ -281,7 +294,7 @@
     attributionSource.uid = 0;
     attributionSource.token = sp<BBinder>::make();
     ASSERT_EQ(OK, mManager->getInputForAttr(
-            &attr, &input, riid, AUDIO_SESSION_NONE, attributionSource, &config, flags,
+            &attr, &input, riid, session, attributionSource, &config, flags,
             selectedDeviceId, &inputType, portId));
     ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
 }
@@ -914,8 +927,8 @@
     audio_source_t source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     audio_attributes_t attr = {
         AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source, AUDIO_FLAG_NONE, ""};
-    ASSERT_NO_FATAL_FAILURE(getInputForAttr(attr, 1, &selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT,
-                    AUDIO_CHANNEL_IN_MONO, 8000, AUDIO_INPUT_FLAG_VOIP_TX, &mixPortId));
+    ASSERT_NO_FATAL_FAILURE(getInputForAttr(attr, AUDIO_SESSION_NONE, 1, &selectedDeviceId,
+     AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000, AUDIO_INPUT_FLAG_VOIP_TX, &mixPortId));
 
     std::vector<audio_port_v7> ports;
     ASSERT_NO_FATAL_FAILURE(
@@ -1352,19 +1365,45 @@
     ASSERT_EQ(INVALID_OPERATION, ret);
 }
 
+struct DPTestParam {
+    DPTestParam(const std::vector<AudioMixMatchCriterion>& mixCriteria,
+                bool expected_match = false)
+     : mixCriteria(mixCriteria), attributes(defaultAttr), session(AUDIO_SESSION_NONE),
+       expected_match(expected_match) {}
+
+    DPTestParam& withUsage(audio_usage_t usage) {
+        attributes.usage = usage;
+        return *this;
+    }
+
+    DPTestParam& withTags(const char *tags) {
+        std::strncpy(attributes.tags, tags, sizeof(attributes.tags));
+        return *this;
+    }
+
+    DPTestParam& withSource(audio_source_t source) {
+        attributes.source = source;
+        return *this;
+    }
+
+    DPTestParam& withSessionId(audio_session_t sessionId) {
+        session = sessionId;
+        return *this;
+    }
+
+    std::vector<AudioMixMatchCriterion> mixCriteria;
+    audio_attributes_t attributes;
+    audio_session_t session;
+    bool expected_match;
+};
+
 class AudioPolicyManagerTestDPPlaybackReRouting : public AudioPolicyManagerTestDynamicPolicy,
-        public testing::WithParamInterface<audio_attributes_t> {
+        public testing::WithParamInterface<DPTestParam> {
 protected:
     void SetUp() override;
     void TearDown() override;
 
     std::unique_ptr<RecordingActivityTracker> mTracker;
-
-    std::vector<AudioMixMatchCriterion> mUsageRules = {
-            createUsageCriterion(AUDIO_USAGE_MEDIA),
-            createUsageCriterion(AUDIO_USAGE_ALARM)
-    };
-
     struct audio_port_v7 mInjectionPort;
     audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
 };
@@ -1378,8 +1417,10 @@
     audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
     audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
     audioConfig.sample_rate = k48000SamplingRate;
+
+    DPTestParam param = GetParam();
     status_t ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
-            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig, mUsageRules);
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig, param.mixCriteria);
     ASSERT_EQ(NO_ERROR, ret);
 
     struct audio_port_v7 extractionPort;
@@ -1392,8 +1433,9 @@
         AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source, AUDIO_FLAG_NONE, ""};
     std::string tags = "addr=" + mMixAddress;
     strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
-    getInputForAttr(attr, mTracker->getRiid(), &selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT,
-            AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate, AUDIO_INPUT_FLAG_NONE, &mPortId);
+    getInputForAttr(attr, param.session, mTracker->getRiid(), &selectedDeviceId,
+                    AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate,
+                    AUDIO_INPUT_FLAG_NONE, &mPortId);
     ASSERT_EQ(NO_ERROR, mManager->startInput(mPortId));
     ASSERT_EQ(extractionPort.id, selectedDeviceId);
 
@@ -1406,152 +1448,169 @@
     AudioPolicyManagerTestDynamicPolicy::TearDown();
 }
 
-TEST_F(AudioPolicyManagerTestDPPlaybackReRouting, InitSuccess) {
-    // SetUp must finish with no assertions
-}
-
-TEST_F(AudioPolicyManagerTestDPPlaybackReRouting, Dump) {
-    dumpToLog();
-}
-
 TEST_P(AudioPolicyManagerTestDPPlaybackReRouting, PlaybackReRouting) {
-    const audio_attributes_t attr = GetParam();
-    const audio_usage_t usage = attr.usage;
+    const DPTestParam param = GetParam();
+    const audio_attributes_t& attr = param.attributes;
 
     audio_port_handle_t playbackRoutedPortId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&playbackRoutedPortId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
             k48000SamplingRate, AUDIO_OUTPUT_FLAG_NONE, nullptr /*output*/, nullptr /*portId*/,
-            attr);
-    if (std::find_if(begin(mUsageRules), end(mUsageRules),
-                [&usage](const AudioMixMatchCriterion &c) {
-                              return c.mRule == RULE_MATCH_ATTRIBUTE_USAGE &&
-                                     c.mValue.mUsage == usage;}) != end(mUsageRules) ||
-            (strncmp(attr.tags, "addr=", strlen("addr=")) == 0 &&
-                    strncmp(attr.tags + strlen("addr="), mMixAddress.c_str(),
-                    AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0)) {
+            attr, param.session);
+    if (param.expected_match) {
         EXPECT_EQ(mInjectionPort.id, playbackRoutedPortId);
     } else {
         EXPECT_NE(mInjectionPort.id, playbackRoutedPortId);
     }
 }
 
-INSTANTIATE_TEST_CASE_P(
-        PlaybackReroutingUsageMatch,
-        AudioPolicyManagerTestDPPlaybackReRouting,
-        testing::Values(
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
-                )
-        );
+const std::vector<AudioMixMatchCriterion> USAGE_MEDIA_ALARM_CRITERIA = {
+            createUsageCriterion(AUDIO_USAGE_MEDIA),
+            createUsageCriterion(AUDIO_USAGE_ALARM)
+};
 
-INSTANTIATE_TEST_CASE_P(
-        PlaybackReroutingAddressPriorityMatch,
-        AudioPolicyManagerTestDPPlaybackReRouting,
-        testing::Values(
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                    AUDIO_USAGE_ASSISTANCE_SONIFICATION,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VIRTUAL_SOURCE,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
-                    AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"}
-                )
-        );
+INSTANTIATE_TEST_SUITE_P(
+    PlaybackReroutingUsageMatch,
+    AudioPolicyManagerTestDPPlaybackReRouting,
+    testing::Values(
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_MEDIA),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_MEDIA).withTags("addr=other"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ALARM),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_VOICE_COMMUNICATION),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_NOTIFICATION),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_EVENT),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_GAME),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ false)
+            .withUsage(AUDIO_USAGE_ASSISTANT)));
 
-INSTANTIATE_TEST_CASE_P(
-        PlaybackReroutingUnHandledUsages,
-        AudioPolicyManagerTestDPPlaybackReRouting,
-        testing::Values(
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
-                                     AUDIO_USAGE_ASSISTANCE_SONIFICATION,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
-                                     AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
-                )
-        );
+INSTANTIATE_TEST_SUITE_P(
+    PlaybackReroutingAddressPriorityMatch,
+    AudioPolicyManagerTestDPPlaybackReRouting,
+    testing::Values(
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_MEDIA).withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_VOICE_COMMUNICATION).withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ALARM)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_NOTIFICATION)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_NOTIFICATION_EVENT)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANCE_SONIFICATION)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_GAME)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_VIRTUAL_SOURCE)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANT)
+            .withTags("addr=remote_submix_media"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANT)
+            .withTags("sometag;addr=remote_submix_media;othertag=somevalue"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANT)
+            .withTags("addr=remote_submix_media;othertag"),
+        DPTestParam(USAGE_MEDIA_ALARM_CRITERIA, /*expected_match=*/ true)
+            .withUsage(AUDIO_USAGE_ASSISTANT)
+            .withTags("sometag;othertag;addr=remote_submix_media")));
+
+static constexpr audio_session_t TEST_SESSION_ID = static_cast<audio_session_t>(42);
+static constexpr audio_session_t OTHER_SESSION_ID = static_cast<audio_session_t>(77);
+
+INSTANTIATE_TEST_SUITE_P(
+    PlaybackReRoutingWithSessionId,
+    AudioPolicyManagerTestDPPlaybackReRouting,
+    testing::Values(
+        // Mix is matched because the session id matches the one specified by the mix rule.
+        DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID)},
+                    /*expected_match=*/ true)
+            .withSessionId(TEST_SESSION_ID),
+        // Mix is not matched because the session id doesn't match the one specified
+        // by the mix rule.
+        DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID)},
+                    /*expected_match=*/ false)
+            .withSessionId(OTHER_SESSION_ID),
+        // Mix is matched, the session id doesn't match the one specified by rule,
+        // but there's address specified in the tags which takes precedence.
+        DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID)},
+                    /*expected_match=*/ true)
+            .withSessionId(OTHER_SESSION_ID).withTags("addr=remote_submix_media"),
+        // Mix is matched, both the session id and the usage match ones specified by mix rule.
+        DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID),
+                                      createUsageCriterion(AUDIO_USAGE_MEDIA)},
+                    /*expected_match=*/ true)
+            .withSessionId(TEST_SESSION_ID).withUsage(AUDIO_USAGE_MEDIA),
+        // Mix is not matched, the session id matches the one specified by mix rule,
+        // but usage does not.
+        DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID),
+                                      createUsageCriterion(AUDIO_USAGE_MEDIA)},
+                    /*expected_match=*/ false)
+                    .withSessionId(TEST_SESSION_ID).withUsage(AUDIO_USAGE_GAME),
+        // Mix is not matched, the usage matches the one specified by mix rule,
+        // but the session id is excluded.
+        DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID, /*exclude=*/ true),
+                                     createUsageCriterion(AUDIO_USAGE_MEDIA)},
+                    /*expected_match=*/ false)
+                    .withSessionId(TEST_SESSION_ID).withUsage(AUDIO_USAGE_MEDIA)));
 
 class AudioPolicyManagerTestDPMixRecordInjection : public AudioPolicyManagerTestDynamicPolicy,
-        public testing::WithParamInterface<audio_attributes_t> {
+        public testing::WithParamInterface<DPTestParam> {
 protected:
     void SetUp() override;
     void TearDown() override;
 
     std::unique_ptr<RecordingActivityTracker> mTracker;
-
-    std::vector<AudioMixMatchCriterion> mSourceRules = {
-        createCapturePresetCriterion(AUDIO_SOURCE_CAMCORDER),
-        createCapturePresetCriterion(AUDIO_SOURCE_MIC),
-        createCapturePresetCriterion(AUDIO_SOURCE_VOICE_COMMUNICATION)
-    };
-
     struct audio_port_v7 mExtractionPort;
     audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
 };
@@ -1565,8 +1624,10 @@
     audioConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
     audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
     audioConfig.sample_rate = k48000SamplingRate;
+
+    DPTestParam param = GetParam();
     status_t ret = addPolicyMix(MIX_TYPE_RECORDERS, MIX_ROUTE_FLAG_LOOP_BACK,
-            AUDIO_DEVICE_IN_REMOTE_SUBMIX, mMixAddress, audioConfig, mSourceRules);
+            AUDIO_DEVICE_IN_REMOTE_SUBMIX, mMixAddress, audioConfig, param.mixCriteria);
     ASSERT_EQ(NO_ERROR, ret);
 
     struct audio_port_v7 injectionPort;
@@ -1593,73 +1654,94 @@
     AudioPolicyManagerTestDynamicPolicy::TearDown();
 }
 
-TEST_F(AudioPolicyManagerTestDPMixRecordInjection, InitSuccess) {
-    // SetUp mush finish with no assertions.
-}
-
-TEST_F(AudioPolicyManagerTestDPMixRecordInjection, Dump) {
-    dumpToLog();
-}
-
 TEST_P(AudioPolicyManagerTestDPMixRecordInjection, RecordingInjection) {
-    const audio_attributes_t attr = GetParam();
-    const audio_source_t source = attr.source;
+    const DPTestParam param = GetParam();
 
     audio_port_handle_t captureRoutedPortId = AUDIO_PORT_HANDLE_NONE;
     audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
-    getInputForAttr(attr, mTracker->getRiid(), &captureRoutedPortId, AUDIO_FORMAT_PCM_16_BIT,
-            AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate, AUDIO_INPUT_FLAG_NONE, &portId);
-    if (std::find_if(begin(mSourceRules), end(mSourceRules),
-               [&source](const AudioMixMatchCriterion &c) {
-            return c.mRule == RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET &&
-                   c.mValue.mSource == source;})
-            != end(mSourceRules)) {
+    getInputForAttr(param.attributes, param.session, mTracker->getRiid(), &captureRoutedPortId,
+        AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate,
+        AUDIO_INPUT_FLAG_NONE, &portId);
+    if (param.expected_match) {
         EXPECT_EQ(mExtractionPort.id, captureRoutedPortId);
     } else {
         EXPECT_NE(mExtractionPort.id, captureRoutedPortId);
     }
 }
 
+const std::vector<AudioMixMatchCriterion> SOURCE_CAM_MIC_VOICE_CRITERIA = {
+        createCapturePresetCriterion(AUDIO_SOURCE_CAMCORDER),
+        createCapturePresetCriterion(AUDIO_SOURCE_MIC),
+        createCapturePresetCriterion(AUDIO_SOURCE_VOICE_COMMUNICATION)
+};
+
 // No address priority rule for remote recording, address is a "don't care"
 INSTANTIATE_TEST_CASE_P(
-        RecordInjectionSourceMatch,
+        RecordInjectionSource,
         AudioPolicyManagerTestDPMixRecordInjection,
         testing::Values(
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_CAMCORDER, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_CAMCORDER, AUDIO_FLAG_NONE,
-                                     "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_MIC, AUDIO_FLAG_NONE,
-                                     "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_MIC, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_VOICE_COMMUNICATION, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_VOICE_COMMUNICATION, AUDIO_FLAG_NONE,
-                                     "addr=remote_submix_media"}
-                )
-        );
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ true)
+                .withSource(AUDIO_SOURCE_CAMCORDER),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ true)
+                .withSource(AUDIO_SOURCE_CAMCORDER)
+                .withTags("addr=remote_submix_media"),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ true)
+                .withSource(AUDIO_SOURCE_MIC),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ true)
+                .withSource(AUDIO_SOURCE_MIC)
+                .withTags("addr=remote_submix_media"),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ true)
+                .withSource(AUDIO_SOURCE_VOICE_COMMUNICATION),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ true)
+                .withSource(AUDIO_SOURCE_VOICE_COMMUNICATION)
+                .withTags("addr=remote_submix_media"),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ false)
+                .withSource(AUDIO_SOURCE_VOICE_RECOGNITION),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ false)
+                .withSource(AUDIO_SOURCE_VOICE_RECOGNITION)
+                .withTags("addr=remote_submix_media"),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ false)
+                .withSource(AUDIO_SOURCE_HOTWORD),
+            DPTestParam(SOURCE_CAM_MIC_VOICE_CRITERIA, /*expected_match=*/ false)
+                .withSource(AUDIO_SOURCE_HOTWORD)
+                .withTags("addr=remote_submix_media")));
 
-// No address priority rule for remote recording
 INSTANTIATE_TEST_CASE_P(
-        RecordInjectionSourceNotMatch,
+        RecordInjectionWithSessionId,
         AudioPolicyManagerTestDPMixRecordInjection,
         testing::Values(
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_HOTWORD, AUDIO_FLAG_NONE, ""},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_FLAG_NONE,
-                                     "addr=remote_submix_media"},
-                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
-                                     AUDIO_SOURCE_HOTWORD, AUDIO_FLAG_NONE,
-                                     "addr=remote_submix_media"}
-                )
-        );
+            // Mix is matched because the session id matches the one specified by the mix rule.
+            DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID)},
+                        /*expected_match=*/ true)
+                .withSessionId(TEST_SESSION_ID),
+            // Mix is not matched because the session id doesn't match the one specified
+            // by the mix rule.
+            DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID)},
+                        /*expected_match=*/ false)
+                .withSessionId(OTHER_SESSION_ID),
+            // Mix is not matched, the session id doesn't match the one specified by rule,
+            // but tand address specified in the tags is ignored for recorder mix.
+            DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID)},
+                        /*expected_match=*/ false)
+                .withSessionId(OTHER_SESSION_ID).withTags("addr=remote_submix_media"),
+            // Mix is matched, both the session id and the source match ones specified by mix rule
+            DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID),
+                                          createCapturePresetCriterion(AUDIO_SOURCE_CAMCORDER)},
+                        /*expected_match=*/ true)
+                .withSessionId(TEST_SESSION_ID).withSource(AUDIO_SOURCE_CAMCORDER),
+            // Mix is not matched, the session id matches the one specified by mix rule,
+            // but source does not.
+            DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID),
+                                          createCapturePresetCriterion(AUDIO_SOURCE_CAMCORDER)},
+                        /*expected_match=*/ false)
+                .withSessionId(TEST_SESSION_ID).withSource(AUDIO_SOURCE_MIC),
+            // Mix is not matched, the source matches the one specified by mix rule,
+            // but the session id is excluded.
+            DPTestParam(/*mixCriteria=*/ {createSessionIdCriterion(TEST_SESSION_ID,
+                                                                       /*exclude=*/ true),
+                                          createCapturePresetCriterion(AUDIO_SOURCE_MIC)},
+                        /*expected_match=*/ false)
+                .withSessionId(TEST_SESSION_ID).withSource(AUDIO_SOURCE_MIC)));
 
 using DeviceConnectionTestParams =
         std::tuple<audio_devices_t /*type*/, std::string /*name*/, std::string /*address*/>;
@@ -1762,8 +1844,9 @@
                 k48000SamplingRate, AUDIO_OUTPUT_FLAG_NONE);
     } else if (audio_is_input_device(type)) {
         RecordingActivityTracker tracker;
-        getInputForAttr({}, tracker.getRiid(), &routedPortId, AUDIO_FORMAT_PCM_16_BIT,
-                AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate, AUDIO_INPUT_FLAG_NONE);
+        getInputForAttr({}, AUDIO_SESSION_NONE, tracker.getRiid(), &routedPortId,
+         AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate,
+         AUDIO_INPUT_FLAG_NONE);
     }
     ASSERT_EQ(devicePort.id, routedPortId);
 
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 981c569..f4ac2a1 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -156,14 +156,14 @@
         "android.hardware.camera.provider@2.5",
         "android.hardware.camera.provider@2.6",
         "android.hardware.camera.provider@2.7",
-        "android.hardware.camera.provider-V1-ndk",
+        "android.hardware.camera.provider-V2-ndk",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.3",
         "android.hardware.camera.device@3.4",
         "android.hardware.camera.device@3.5",
         "android.hardware.camera.device@3.6",
         "android.hardware.camera.device@3.7",
-        "android.hardware.camera.device-V1-ndk",
+        "android.hardware.camera.device-V2-ndk",
         "media_permission-aidl-cpp",
     ],
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index bc8981e..f87e241 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -206,6 +206,7 @@
     status_t res;
 
     std::vector<std::string> deviceIds;
+    std::unordered_map<std::string, std::set<std::string>> unavailPhysicalIds;
     {
         Mutex::Autolock l(mServiceLock);
 
@@ -236,7 +237,7 @@
             ALOGE("Failed to enumerate flash units: %s (%d)", strerror(-res), res);
         }
 
-        deviceIds = mCameraProviderManager->getCameraDeviceIds();
+        deviceIds = mCameraProviderManager->getCameraDeviceIds(&unavailPhysicalIds);
     }
 
 
@@ -245,6 +246,12 @@
         if (getCameraState(id8) == nullptr) {
             onDeviceStatusChanged(id8, CameraDeviceStatus::PRESENT);
         }
+        if (unavailPhysicalIds.count(cameraId) > 0) {
+            for (const auto& physicalId : unavailPhysicalIds[cameraId]) {
+                String8 physicalId8 = String8(physicalId.c_str());
+                onDeviceStatusChanged(id8, physicalId8, CameraDeviceStatus::NOT_PRESENT);
+            }
+        }
     }
 
     // Derive primary rear/front cameras, and filter their charactierstics.
@@ -501,7 +508,7 @@
 
     if (state == nullptr) {
         ALOGE("%s: Physical camera id %s status change on a non-present ID %s",
-                __FUNCTION__, id.string(), physicalId.string());
+                __FUNCTION__, physicalId.string(), id.string());
         return;
     }
 
@@ -1767,20 +1774,68 @@
     return ret;
 }
 
+String16 CameraService::getPackageNameFromUid(int clientUid) {
+    String16 packageName("");
+
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16(kPermissionServiceName));
+    if (binder == 0) {
+        ALOGE("Cannot get permission service");
+        // Return empty package name and the further interaction
+        // with camera will likely fail
+        return packageName;
+    }
+
+    sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
+    Vector<String16> packages;
+
+    permCtrl->getPackagesForUid(clientUid, packages);
+
+    if (packages.isEmpty()) {
+        ALOGE("No packages for calling UID %d", clientUid);
+        // Return empty package name and the further interaction
+        // with camera will likely fail
+        return packageName;
+    }
+
+    // Arbitrarily pick the first name in the list
+    packageName = packages[0];
+
+    return packageName;
+}
+
 template<class CALLBACK, class CLIENT>
 Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
-        int api1CameraId, const String16& clientPackageName, bool systemNativeClient,
+        int api1CameraId, const String16& clientPackageNameMaybe, bool systemNativeClient,
         const std::optional<String16>& clientFeatureId, int clientUid, int clientPid,
         apiLevel effectiveApiLevel, bool shimUpdateOnly, int oomScoreOffset, int targetSdkVersion,
         /*out*/sp<CLIENT>& device) {
     binder::Status ret = binder::Status::ok();
 
+    bool isNonSystemNdk = false;
+    String16 clientPackageName;
+    if (clientPackageNameMaybe.size() <= 0) {
+        // NDK calls don't come with package names, but we need one for various cases.
+        // Generally, there's a 1:1 mapping between UID and package name, but shared UIDs
+        // do exist. For all authentication cases, all packages under the same UID get the
+        // same permissions, so picking any associated package name is sufficient. For some
+        // other cases, this may give inaccurate names for clients in logs.
+        isNonSystemNdk = true;
+        int packageUid = (clientUid == USE_CALLING_UID) ?
+            CameraThreadState::getCallingUid() : clientUid;
+        clientPackageName = getPackageNameFromUid(packageUid);
+    } else {
+        clientPackageName = clientPackageNameMaybe;
+    }
+
     String8 clientName8(clientPackageName);
 
     int originalClientPid = 0;
 
+    int packagePid = (clientPid == USE_CALLING_PID) ?
+        CameraThreadState::getCallingPid() : clientPid;
     ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) and "
-            "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
+            "Camera API version %d", packagePid, clientName8.string(), cameraId.string(),
             static_cast<int>(effectiveApiLevel));
 
     nsecs_t openTimeNs = systemTime();
@@ -1788,7 +1843,7 @@
     sp<CLIENT> client = nullptr;
     int facing = -1;
     int orientation = 0;
-    bool isNonSystemNdk = (clientPackageName.size() == 0);
+
     {
         // Acquire mServiceLock and prevent other clients from connecting
         std::unique_ptr<AutoConditionLock> lock =
@@ -2046,6 +2101,10 @@
                 onlineClientDesc->getOwnerId(), onlinePriority.getState(),
                 // native clients don't have offline processing support.
                 /*ommScoreOffset*/ 0, /*systemNativeClient*/false);
+        if (offlineClientDesc == nullptr) {
+            ALOGE("%s: Offline client descriptor was NULL", __FUNCTION__);
+            return BAD_VALUE;
+        }
 
         // Allow only one offline device per camera
         auto incompatibleClients = mActiveClientManager.getIncompatibleClients(offlineClientDesc);
@@ -3279,37 +3338,6 @@
         sCameraService = cameraService;
     }
 
-    // In some cases the calling code has no access to the package it runs under.
-    // For example, NDK camera API.
-    // In this case we will get the packages for the calling UID and pick the first one
-    // for attributing the app op. This will work correctly for runtime permissions
-    // as for legacy apps we will toggle the app op for all packages in the UID.
-    // The caveat is that the operation may be attributed to the wrong package and
-    // stats based on app ops may be slightly off.
-    if (mClientPackageName.size() <= 0) {
-        sp<IServiceManager> sm = defaultServiceManager();
-        sp<IBinder> binder = sm->getService(String16(kPermissionServiceName));
-        if (binder == 0) {
-            ALOGE("Cannot get permission service");
-            // Leave mClientPackageName unchanged (empty) and the further interaction
-            // with camera will fail in BasicClient::startCameraOps
-            return;
-        }
-
-        sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
-        Vector<String16> packages;
-
-        permCtrl->getPackagesForUid(mClientUid, packages);
-
-        if (packages.isEmpty()) {
-            ALOGE("No packages for calling UID");
-            // Leave mClientPackageName unchanged (empty) and the further interaction
-            // with camera will fail in BasicClient::startCameraOps
-            return;
-        }
-        mClientPackageName = packages[0];
-    }
-
     // There are 2 scenarios in which a client won't have AppOps operations
     // (both scenarios : native clients)
     //    1) It's an system native client*, the package name will be empty
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 0395475..6e2300a 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -834,10 +834,19 @@
     // sorted in alpha-numeric order.
     void filterAPI1SystemCameraLocked(const std::vector<std::string> &normalDeviceIds);
 
+    // In some cases the calling code has no access to the package it runs under.
+    // For example, NDK camera API.
+    // In this case we will get the packages for the calling UID and pick the first one
+    // for attributing the app op. This will work correctly for runtime permissions
+    // as for legacy apps we will toggle the app op for all packages in the UID.
+    // The caveat is that the operation may be attributed to the wrong package and
+    // stats based on app ops may be slightly off.
+    String16 getPackageNameFromUid(int clientUid);
+
     // Single implementation shared between the various connect calls
     template<class CALLBACK, class CLIENT>
     binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
-            int api1CameraId, const String16& clientPackageName, bool systemNativeClient,
+            int api1CameraId, const String16& clientPackageNameMaybe, bool systemNativeClient,
             const std::optional<String16>& clientFeatureId, int clientUid, int clientPid,
             apiLevel effectiveApiLevel, bool shimUpdateOnly, int scoreOffset, int targetSdkVersion,
             /*out*/sp<CLIENT>& device);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 123cd75..8b2af90 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -953,6 +953,12 @@
                 false);
 
     if (availableVideoStabilizationModes.count > 1) {
+        for (size_t i = 0; i < availableVideoStabilizationModes.count; i++) {
+            if (availableVideoStabilizationModes.data.u8[i] ==
+                ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON) {
+                videoStabilizationOnSupported = true;
+            }
+        }
         params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
                 CameraParameters::TRUE);
     } else {
@@ -2373,9 +2379,11 @@
             reqCropRegion, 4);
     if (res != OK) return res;
 
-    uint8_t reqVstabMode = videoStabilization ?
+    uint8_t reqVstabMode = videoStabilization ? videoStabilizationOnSupported ?
             ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON :
+                    ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION :
             ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+
     res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
             &reqVstabMode, 1);
     if (res != OK) return res;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index cbe62a7..fd18a5d 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -147,6 +147,7 @@
 
     bool recordingHint;
     bool videoStabilization;
+    bool videoStabilizationOnSupported = false;
 
     CameraParameters2 params;
     String8 paramsFlattened;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index ba26ac4..c49ecb2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -884,6 +884,7 @@
     int64_t streamUseCase = outputConfiguration.getStreamUseCase();
     int timestampBase = outputConfiguration.getTimestampBase();
     int mirrorMode = outputConfiguration.getMirrorMode();
+    int32_t colorSpace = outputConfiguration.getColorSpace();
 
     res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
             outputConfiguration.getSurfaceType());
@@ -928,7 +929,7 @@
         res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo,
                 isStreamInfoValid, surface, bufferProducer, mCameraIdStr,
                 mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed, dynamicRangeProfile,
-                streamUseCase, timestampBase, mirrorMode);
+                streamUseCase, timestampBase, mirrorMode, colorSpace);
 
         if (!res.isOk())
             return res;
@@ -975,7 +976,7 @@
                 &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
                 outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution,
                 /*consumerUsage*/0, streamInfo.dynamicRangeProfile, streamInfo.streamUseCase,
-                streamInfo.timestampBase, streamInfo.mirrorMode);
+                streamInfo.timestampBase, streamInfo.mirrorMode, streamInfo.colorSpace);
     }
 
     if (err != OK) {
@@ -1027,6 +1028,7 @@
     int width, height, format, surfaceType;
     uint64_t consumerUsage;
     android_dataspace dataSpace;
+    int32_t colorSpace;
     status_t err;
     binder::Status res;
 
@@ -1040,6 +1042,7 @@
     surfaceType = outputConfiguration.getSurfaceType();
     format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
     dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
+    colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED;
     // Hardcode consumer usage flags: SurfaceView--0x900, SurfaceTexture--0x100.
     consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
     if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
@@ -1089,7 +1092,8 @@
                         outputConfiguration.getDynamicRangeProfile(),
                         outputConfiguration.getStreamUseCase(),
                         outputConfiguration.getTimestampBase(),
-                        outputConfiguration.getMirrorMode()));
+                        outputConfiguration.getMirrorMode(),
+                        colorSpace));
 
         ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
                 " (%d x %d) stream with format 0x%x.",
@@ -1280,6 +1284,7 @@
     int64_t streamUseCase = outputConfiguration.getStreamUseCase();
     int timestampBase = outputConfiguration.getTimestampBase();
     int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
+    int32_t colorSpace = outputConfiguration.getColorSpace();
     int mirrorMode = outputConfiguration.getMirrorMode();
 
     for (size_t i = 0; i < newOutputsMap.size(); i++) {
@@ -1288,7 +1293,7 @@
         res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo,
                 /*isStreamInfoValid*/ false, surface, newOutputsMap.valueAt(i), mCameraIdStr,
                 mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed, dynamicRangeProfile,
-                streamUseCase, timestampBase, mirrorMode);
+                streamUseCase, timestampBase, mirrorMode, colorSpace);
         if (!res.isOk())
             return res;
 
@@ -1646,7 +1651,8 @@
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
     int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
-    int64_t streamUseCase= outputConfiguration.getStreamUseCase();
+    int32_t colorSpace = outputConfiguration.getColorSpace();
+    int64_t streamUseCase = outputConfiguration.getStreamUseCase();
     int timestampBase = outputConfiguration.getTimestampBase();
     int mirrorMode = outputConfiguration.getMirrorMode();
     for (auto& bufferProducer : bufferProducers) {
@@ -1662,7 +1668,7 @@
         res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
                 true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
                 mDevice->infoPhysical(physicalId), sensorPixelModesUsed, dynamicRangeProfile,
-                streamUseCase, timestampBase, mirrorMode);
+                streamUseCase, timestampBase, mirrorMode, colorSpace);
 
         if (!res.isOk())
             return res;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 54cc27a..71965f2 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -931,7 +931,7 @@
                 tempOutputFile.str().c_str(), errno);
         return NO_INIT;
     }
-    inputFrame.muxer = new MediaMuxer(inputFrame.fileFd, MediaMuxer::OUTPUT_FORMAT_HEIF);
+    inputFrame.muxer = MediaMuxer::create(inputFrame.fileFd, MediaMuxer::OUTPUT_FORMAT_HEIF);
     if (inputFrame.muxer == nullptr) {
         ALOGE("%s: Failed to create MediaMuxer for file fd %d",
                 __FUNCTION__, inputFrame.fileFd);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index f926b88..95b4050 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -40,8 +40,6 @@
 
 namespace android {
 
-const static size_t kDisconnectTimeoutMs = 2500;
-
 using namespace camera2;
 
 // Interface used by CameraService
@@ -266,10 +264,16 @@
 
 template <typename TClientBase>
 binder::Status Camera2ClientBase<TClientBase>::disconnect() {
-    if (mCameraServiceWatchdog != nullptr) {
+    if (mCameraServiceWatchdog != nullptr && mDevice != nullptr) {
+        // Timer for the disconnect call should be greater than getExpectedInFlightDuration
+        // since this duration is used to error handle methods in the disconnect sequence
+        // thus allowing existing error handling methods to execute first
+        uint64_t maxExpectedDuration =
+                ns2ms(mDevice->getExpectedInFlightDuration() + kBufferTimeDisconnectNs);
+
         // Initialization from hal succeeded, time disconnect.
         return mCameraServiceWatchdog->WATCH_CUSTOM_TIMER(disconnectImpl(),
-                kDisconnectTimeoutMs / kCycleLengthMs, kCycleLengthMs);
+                maxExpectedDuration / kCycleLengthMs, kCycleLengthMs);
     }
     return disconnectImpl();
 }
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 6e37589..37a7200 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -134,6 +134,9 @@
 
 protected:
 
+    // Used for watchdog timeout to monitor disconnect
+    static const nsecs_t kBufferTimeDisconnectNs = 3000000000; // 3 sec.
+
     // The PID provided in the constructor call
     pid_t mInitialClientPid;
     bool mOverrideForPerfClass = false;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 69514f3..89a2af8 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -192,7 +192,9 @@
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) = 0;
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+            = 0;
 
     /**
      * Create an output stream of the requested size, format, rotation and
@@ -213,7 +215,9 @@
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) = 0;
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+            = 0;
 
     /**
      * Create an input stream of width, height, and format.
@@ -235,11 +239,13 @@
         bool dataSpaceOverridden;
         android_dataspace originalDataSpace;
         int64_t dynamicRangeProfile;
+        int32_t colorSpace;
 
         StreamInfo() : width(0), height(0), format(0), formatOverridden(false), originalFormat(0),
                 dataSpace(HAL_DATASPACE_UNKNOWN), dataSpaceOverridden(false),
                 originalDataSpace(HAL_DATASPACE_UNKNOWN),
-                dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD){}
+                dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
+                colorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED) {}
         /**
          * Check whether the format matches the current or the original one in case
          * it got overridden.
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index cd23250..851a6d0 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -197,12 +197,17 @@
     return std::make_pair(systemCameraCount, publicCameraCount);
 }
 
-std::vector<std::string> CameraProviderManager::getCameraDeviceIds() const {
+std::vector<std::string> CameraProviderManager::getCameraDeviceIds(std::unordered_map<
+            std::string, std::set<std::string>>* unavailablePhysicalIds) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     std::vector<std::string> deviceIds;
     for (auto& provider : mProviders) {
         for (auto& id : provider->mUniqueCameraIds) {
             deviceIds.push_back(id);
+            if (unavailablePhysicalIds != nullptr &&
+                    provider->mUnavailablePhysicalCameras.count(id) > 0) {
+                (*unavailablePhysicalIds)[id] = provider->mUnavailablePhysicalCameras.at(id);
+            }
         }
     }
     return deviceIds;
@@ -843,9 +848,6 @@
 
 void CameraProviderManager::ProviderInfo::initializeProviderInfoCommon(
         const std::vector<std::string> &devices) {
-
-    sp<StatusListener> listener = mManager->getStatusListener();
-
     for (auto& device : devices) {
         std::string id;
         status_t res = addDevice(device, CameraDeviceStatus::PRESENT, &id);
@@ -860,38 +862,22 @@
             mProviderName.c_str(), mDevices.size());
 
     // Process cached status callbacks
-    std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus =
-            std::make_unique<std::vector<CameraStatusInfoT>>();
     {
         std::lock_guard<std::mutex> lock(mInitLock);
 
         for (auto& statusInfo : mCachedStatus) {
             std::string id, physicalId;
-            status_t res = OK;
             if (statusInfo.isPhysicalCameraStatus) {
-                res = physicalCameraDeviceStatusChangeLocked(&id, &physicalId,
+                physicalCameraDeviceStatusChangeLocked(&id, &physicalId,
                     statusInfo.cameraId, statusInfo.physicalCameraId, statusInfo.status);
             } else {
-                res = cameraDeviceStatusChangeLocked(&id, statusInfo.cameraId, statusInfo.status);
-            }
-            if (res == OK) {
-                cachedStatus->emplace_back(statusInfo.isPhysicalCameraStatus,
-                        id.c_str(), physicalId.c_str(), statusInfo.status);
+                cameraDeviceStatusChangeLocked(&id, statusInfo.cameraId, statusInfo.status);
             }
         }
         mCachedStatus.clear();
 
         mInitialized = true;
     }
-
-    // The cached status change callbacks cannot be fired directly from this
-    // function, due to same-thread deadlock trying to acquire mInterfaceMutex
-    // twice.
-    if (listener != nullptr) {
-        mInitialStatusCallbackFuture = std::async(std::launch::async,
-                &CameraProviderManager::ProviderInfo::notifyInitialStatusChange, this,
-                listener, std::move(cachedStatus));
-    }
 }
 
 CameraProviderManager::ProviderInfo::DeviceInfo* CameraProviderManager::findDeviceInfoLocked(
@@ -1961,6 +1947,7 @@
     for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
         if ((*it)->mId == id) {
             mUniqueCameraIds.erase(id);
+            mUnavailablePhysicalCameras.erase(id);
             if ((*it)->isAPI1Compatible()) {
                 mUniqueAPI1CompatibleCameraIds.erase(std::remove(
                     mUniqueAPI1CompatibleCameraIds.begin(),
@@ -2228,6 +2215,15 @@
         return BAD_VALUE;
     }
 
+    if (mUnavailablePhysicalCameras.count(cameraId) == 0) {
+        mUnavailablePhysicalCameras.emplace(cameraId, std::set<std::string>{});
+    }
+    if (newStatus != CameraDeviceStatus::PRESENT) {
+        mUnavailablePhysicalCameras[cameraId].insert(physicalCameraDeviceName);
+    } else {
+        mUnavailablePhysicalCameras[cameraId].erase(physicalCameraDeviceName);
+    }
+
     *id = cameraId;
     *physicalId = physicalCameraDeviceName.c_str();
     return OK;
@@ -2286,20 +2282,6 @@
     }
 }
 
-void CameraProviderManager::ProviderInfo::notifyInitialStatusChange(
-        sp<StatusListener> listener,
-        std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus) {
-    for (auto& statusInfo : *cachedStatus) {
-        if (statusInfo.isPhysicalCameraStatus) {
-            listener->onDeviceStatusChanged(String8(statusInfo.cameraId.c_str()),
-                    String8(statusInfo.physicalCameraId.c_str()), statusInfo.status);
-        } else {
-            listener->onDeviceStatusChanged(
-                    String8(statusInfo.cameraId.c_str()), statusInfo.status);
-        }
-    }
-}
-
 CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
         const metadata_vendor_id_t tagId, const std::string &id,
         uint16_t minorVersion,
@@ -2649,9 +2631,6 @@
 }
 
 CameraProviderManager::ProviderInfo::~ProviderInfo() {
-    if (mInitialStatusCallbackFuture.valid()) {
-        mInitialStatusCallbackFuture.wait();
-    }
     // Destruction of ProviderInfo is only supposed to happen when the respective
     // CameraProvider interface dies, so do not unregister callbacks.
 }
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index d049aff..86047e8 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -23,7 +23,6 @@
 #include <set>
 #include <string>
 #include <mutex>
-#include <future>
 
 #include <camera/camera2/ConcurrentCamera.h>
 #include <camera/CameraParameters2.h>
@@ -220,7 +219,14 @@
      */
     std::pair<int, int> getCameraCount() const;
 
-    std::vector<std::string> getCameraDeviceIds() const;
+    /**
+     * Upon the function return, if unavailablePhysicalIds is not nullptr, it
+     * will contain all of the unavailable physical camera Ids represented in
+     * the form of:
+     * {[logicalCamera, {physicalCamera1, physicalCamera2, ...}], ...}.
+     */
+    std::vector<std::string> getCameraDeviceIds(std::unordered_map<
+            std::string, std::set<std::string>>* unavailablePhysicalIds = nullptr) const;
 
     /**
      * Retrieve the number of API1 compatible cameras; these are internal and
@@ -607,6 +613,7 @@
         };
         std::vector<std::unique_ptr<DeviceInfo>> mDevices;
         std::unordered_set<std::string> mUniqueCameraIds;
+        std::unordered_map<std::string, std::set<std::string>> mUnavailablePhysicalCameras;
         int mUniqueDeviceCount;
         std::vector<std::string> mUniqueAPI1CompatibleCameraIds;
         // The initial public camera IDs published by the camera provider.
@@ -715,8 +722,6 @@
         std::vector<CameraStatusInfoT> mCachedStatus;
         // End of scope for mInitLock
 
-        std::future<void> mInitialStatusCallbackFuture;
-
         std::unique_ptr<ProviderInfo::DeviceInfo>
         virtual initializeDeviceInfo(
                 const std::string &name, const metadata_vendor_id_t tagId,
@@ -724,9 +729,6 @@
 
         virtual status_t reCacheConcurrentStreamingCameraIdsLocked() = 0;
 
-        void notifyInitialStatusChange(sp<StatusListener> listener,
-                std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus);
-
         std::vector<std::unordered_set<std::string>> mConcurrentCameraIdCombinations;
 
         // Parse provider instance name for type and id
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index ef68f28..6d35391 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -550,6 +550,11 @@
                     "ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL tags: %s (%d)", __FUNCTION__,
                     strerror(-res), res);
         }
+
+        // b/247038031: In case of system_server crash, camera_server is
+        // restarted as well. If flashlight is turned on before the crash, it
+        // may be stuck to be on. As a workaround, set torch mode to be OFF.
+        interface->setTorchMode(false);
     } else {
         mHasFlashUnit = false;
     }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 3672666..17a4a44 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -992,7 +992,7 @@
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
             uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
-            int timestampBase, int mirrorMode) {
+            int timestampBase, int mirrorMode, int32_t colorSpace) {
     ATRACE_CALL();
 
     if (consumer == nullptr) {
@@ -1006,7 +1006,7 @@
     return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
             format, dataSpace, rotation, id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
             streamSetId, isShared, isMultiResolution, consumerUsage, dynamicRangeProfile,
-            streamUseCase, timestampBase, mirrorMode);
+            streamUseCase, timestampBase, mirrorMode, colorSpace);
 }
 
 static bool isRawFormat(int format) {
@@ -1027,7 +1027,7 @@
         const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
         std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
         uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
-        int timestampBase, int mirrorMode) {
+        int timestampBase, int mirrorMode, int32_t colorSpace) {
     ATRACE_CALL();
 
     Mutex::Autolock il(mInterfaceLock);
@@ -1036,10 +1036,10 @@
     ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
             " consumer usage %" PRIu64 ", isShared %d, physicalCameraId %s, isMultiResolution %d"
             " dynamicRangeProfile 0x%" PRIx64 ", streamUseCase %" PRId64 ", timestampBase %d,"
-            " mirrorMode %d",
+            " mirrorMode %d colorSpace %d",
             mId.string(), mNextStreamId, width, height, format, dataSpace, rotation,
             consumerUsage, isShared, physicalCameraId.string(), isMultiResolution,
-            dynamicRangeProfile, streamUseCase, timestampBase, mirrorMode);
+            dynamicRangeProfile, streamUseCase, timestampBase, mirrorMode, colorSpace);
 
     status_t res;
     bool wasActive = false;
@@ -1110,7 +1110,7 @@
                 width, height, blobBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
                 isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
-                timestampBase, mirrorMode);
+                timestampBase, mirrorMode, colorSpace);
     } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         bool maxResolution =
                 sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
@@ -1125,25 +1125,25 @@
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
                 isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
-                timestampBase, mirrorMode);
+                timestampBase, mirrorMode, colorSpace);
     } else if (isShared) {
         newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
                 mUseHalBufManager, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
-                timestampBase, mirrorMode);
+                timestampBase, mirrorMode, colorSpace);
     } else if (consumers.size() == 0 && hasDeferredConsumer) {
         newStream = new Camera3OutputStream(mNextStreamId,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
                 isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
-                timestampBase, mirrorMode);
+                timestampBase, mirrorMode, colorSpace);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
                 isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
-                timestampBase, mirrorMode);
+                timestampBase, mirrorMode, colorSpace);
     }
 
     size_t consumerCount = consumers.size();
@@ -1231,6 +1231,7 @@
     streamInfo->dataSpaceOverridden = stream->isDataSpaceOverridden();
     streamInfo->originalDataSpace = stream->getOriginalDataSpace();
     streamInfo->dynamicRangeProfile = stream->getDynamicRangeProfile();
+    streamInfo->colorSpace = stream->getColorSpace();
     return OK;
 }
 
@@ -1744,7 +1745,7 @@
     }
 
     // Calculate expected duration for flush with additional buffer time in ms for watchdog
-    uint64_t maxExpectedDuration = (getExpectedInFlightDuration() + kBaseGetBufferWait) / 1e6;
+    uint64_t maxExpectedDuration = ns2ms(getExpectedInFlightDuration() + kBaseGetBufferWait);
     status_t res = mCameraServiceWatchdog->WATCH_CUSTOM_TIMER(mRequestThread->flush(),
             maxExpectedDuration / kCycleLengthMs, kCycleLengthMs);
 
@@ -1880,7 +1881,8 @@
                     stream->getFormat(), streamMaxPreviewFps, stream->getDataSpace(), usage,
                     stream->getMaxHalBuffers(),
                     stream->getMaxTotalBuffers() - stream->getMaxHalBuffers(),
-                    stream->getDynamicRangeProfile(), streamUseCase);
+                    stream->getDynamicRangeProfile(), streamUseCase,
+                    stream->getColorSpace());
             }
         }
     }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 80b1722..f5e167e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -151,7 +151,9 @@
             ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) override;
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+            override;
 
     status_t createStream(const std::vector<sp<Surface>>& consumers,
             bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
@@ -166,7 +168,9 @@
             ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) override;
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+            override;
 
     status_t createInputStream(
             uint32_t width, uint32_t height, int format, bool isMultiResolution,
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index f594f84..a78d01e 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -35,11 +35,12 @@
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile, int64_t streamUseCase,
-        bool deviceTimeBaseIsRealtime, int timestampBase) :
+        bool deviceTimeBaseIsRealtime, int timestampBase, int32_t colorSpace) :
         Camera3Stream(id, type,
                 width, height, maxSize, format, dataSpace, rotation,
                 physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
-                dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime, timestampBase),
+                dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime, timestampBase,
+                colorSpace),
         mTotalBufferCount(0),
         mMaxCachedBufferCount(0),
         mHandoutTotalBufferCount(0),
@@ -93,6 +94,7 @@
     }
     lines.appendFormat("      Dynamic Range Profile: 0x%" PRIx64 "\n",
             camera_stream::dynamic_range_profile);
+    lines.appendFormat("      Color Space: %d\n", camera_stream::color_space);
     lines.appendFormat("      Stream use case: %" PRId64 "\n", camera_stream::use_case);
     lines.appendFormat("      Timestamp base: %d\n", getTimestampBase());
     lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index ca1f238..6af0875 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -41,7 +41,8 @@
             int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
-            int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
+            int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
 
   public:
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index ec8d484..1abcd86 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -56,12 +56,12 @@
         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
-        int mirrorMode) :
+        int mirrorMode, int32_t colorSpace) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
-                            timestampBase),
+                            timestampBase, colorSpace),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -91,11 +91,11 @@
         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
-        int mirrorMode) :
+        int mirrorMode, int32_t colorSpace) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
                             setId, isMultiResolution, dynamicRangeProfile, streamUseCase,
-                            deviceTimeBaseIsRealtime, timestampBase),
+                            deviceTimeBaseIsRealtime, timestampBase, colorSpace),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -131,12 +131,12 @@
         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
-        int mirrorMode) :
+        int mirrorMode, int32_t colorSpace) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
-                            timestampBase),
+                            timestampBase, colorSpace),
         mConsumer(nullptr),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -180,13 +180,13 @@
                                          int setId, bool isMultiResolution,
                                          int64_t dynamicRangeProfile, int64_t streamUseCase,
                                          bool deviceTimeBaseIsRealtime, int timestampBase,
-                                         int mirrorMode) :
+                                         int mirrorMode, int32_t colorSpace) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
                             format, dataSpace, rotation,
                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
-                            timestampBase),
+                            timestampBase, colorSpace),
         mTransform(0),
         mTraceFirstBuffer(true),
         mUseBufferManager(false),
@@ -1407,18 +1407,24 @@
 }
 
 nsecs_t Camera3OutputStream::syncTimestampToDisplayLocked(nsecs_t t) {
+    nsecs_t currentTime = systemTime();
+    if (!mFixedFps) {
+        mLastCaptureTime = t;
+        mLastPresentTime = currentTime;
+        return t;
+    }
+
     ParcelableVsyncEventData parcelableVsyncEventData;
     auto res = mDisplayEventReceiver.getLatestVsyncEventData(&parcelableVsyncEventData);
     if (res != OK) {
         ALOGE("%s: Stream %d: Error getting latest vsync event data: %s (%d)",
                 __FUNCTION__, mId, strerror(-res), res);
         mLastCaptureTime = t;
-        mLastPresentTime = t;
+        mLastPresentTime = currentTime;
         return t;
     }
 
     const VsyncEventData& vsyncEventData = parcelableVsyncEventData.vsync;
-    nsecs_t currentTime = systemTime();
     nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
 
     // Find the best presentation time without worrying about previous frame's
@@ -1523,8 +1529,8 @@
         }
     }
 
-    if (expectedPresentT == mLastPresentTime && expectedPresentT <=
-            vsyncEventData.frameTimelines[maxTimelines].expectedPresentationTime) {
+    if (expectedPresentT == mLastPresentTime && expectedPresentT <
+            vsyncEventData.frameTimelines[maxTimelines-1].expectedPresentationTime) {
         // Couldn't find a reasonable presentation time. Using last frame's
         // presentation time would cause a frame drop. The best option now
         // is to use the next VSync as long as the last presentation time
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 741bca2..0d758bc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -96,7 +96,8 @@
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
     /**
      * Set up a stream for formats that have a variable buffer size for the same
      * dimensions, such as compressed JPEG.
@@ -113,7 +114,8 @@
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
     /**
      * Set up a stream with deferred consumer for formats that have 2 dimensions, such as
      * RAW and YUV. The consumer must be set before using this stream for output. A valid
@@ -129,7 +131,8 @@
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
 
     virtual ~Camera3OutputStream();
 
@@ -273,7 +276,8 @@
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
 
     /**
      * Note that we release the lock briefly in this function
@@ -432,7 +436,7 @@
     static constexpr nsecs_t kSpacingResetIntervalNs = 50000000LL; // 50 millisecond
     static constexpr nsecs_t kTimelineThresholdNs = 1000000LL; // 1 millisecond
     static constexpr float kMaxIntervalRatioDeviation = 0.05f;
-    static constexpr int kMaxTimelines = 3;
+    static constexpr int kMaxTimelines = 2;
     nsecs_t syncTimestampToDisplayLocked(nsecs_t t);
 
     // Re-space frames by delaying queueBuffer so that frame delivery has
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 9215f23..da45227 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -35,12 +35,12 @@
         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
         int setId, bool useHalBufManager, int64_t dynamicProfile,
         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
-        int mirrorMode) :
+        int mirrorMode, int32_t colorSpace) :
         Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
                             transport, consumerUsage, timestampOffset, setId,
                             /*isMultiResolution*/false, dynamicProfile, streamUseCase,
-                            deviceTimeBaseIsRealtime, timestampBase, mirrorMode),
+                            deviceTimeBaseIsRealtime, timestampBase, mirrorMode, colorSpace),
         mUseHalBufManager(useHalBufManager) {
     size_t consumerCount = std::min(surfaces.size(), kMaxOutputs);
     if (surfaces.size() > consumerCount) {
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index aac3c2a..5167225 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -45,7 +45,8 @@
             int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
             bool deviceTimeBaseIsRealtime = false,
             int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
-            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
+            int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
+            int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
 
     virtual ~Camera3SharedOutputStream();
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 88be9ff..4d8495f 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -55,7 +55,8 @@
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase) :
+        int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+        int32_t colorSpace) :
     camera_stream(),
     mId(id),
     mSetId(setId),
@@ -95,6 +96,7 @@
     camera_stream::sensor_pixel_modes_used = sensorPixelModesUsed;
     camera_stream::dynamic_range_profile = dynamicRangeProfile;
     camera_stream::use_case = streamUseCase;
+    camera_stream::color_space = colorSpace;
 
     if ((format == HAL_PIXEL_FORMAT_BLOB || format == HAL_PIXEL_FORMAT_RAW_OPAQUE) &&
             maxSize == 0) {
@@ -135,6 +137,10 @@
     return camera_stream::data_space;
 }
 
+int32_t Camera3Stream::getColorSpace() const {
+    return camera_stream::color_space;
+}
+
 uint64_t Camera3Stream::getUsage() const {
     return mUsage;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 214618a..f32053b 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -167,6 +167,7 @@
     uint32_t          getHeight() const;
     int               getFormat() const;
     android_dataspace getDataSpace() const;
+    int32_t           getColorSpace() const;
     uint64_t          getUsage() const;
     void              setUsage(uint64_t usage);
     void              setFormatOverride(bool formatOverriden);
@@ -509,7 +510,8 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
-            int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase);
+            int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+            int32_t colorSpace);
 
     wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
 
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 6812e89..823be2e 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -67,6 +67,7 @@
     std::unordered_set<int32_t> sensor_pixel_modes_used;
     int64_t dynamic_range_profile;
     int64_t use_case;
+    int32_t color_space;
 } camera_stream_t;
 
 typedef struct camera_stream_buffer {
@@ -114,20 +115,24 @@
         int64_t streamUseCase;
         int timestampBase;
         int mirrorMode;
+        int32_t colorSpace;
         OutputStreamInfo() :
             width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
             consumerUsage(0),
             dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
             streamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT),
             timestampBase(OutputConfiguration::TIMESTAMP_BASE_DEFAULT),
-            mirrorMode(OutputConfiguration::MIRROR_MODE_AUTO) {}
+            mirrorMode(OutputConfiguration::MIRROR_MODE_AUTO),
+            colorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED) {}
         OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
                 uint64_t _consumerUsage, const std::unordered_set<int32_t>& _sensorPixelModesUsed,
-                int64_t _dynamicRangeProfile, int _streamUseCase, int _timestampBase, int _mirrorMode) :
+                int64_t _dynamicRangeProfile, int _streamUseCase, int _timestampBase, int _mirrorMode,
+                int32_t _colorSpace) :
             width(_width), height(_height), format(_format),
             dataSpace(_dataSpace), consumerUsage(_consumerUsage),
             sensorPixelModesUsed(_sensorPixelModesUsed), dynamicRangeProfile(_dynamicRangeProfile),
-            streamUseCase(_streamUseCase), timestampBase(_timestampBase), mirrorMode(_mirrorMode) {}
+            streamUseCase(_streamUseCase), timestampBase(_timestampBase), mirrorMode(_mirrorMode),
+            colorSpace(_colorSpace) {}
 };
 
 // Utility class to lock and unlock a GraphicBuffer
@@ -206,6 +211,7 @@
     virtual int      getFormat() const = 0;
     virtual int64_t  getDynamicRangeProfile() const = 0;
     virtual android_dataspace getDataSpace() const = 0;
+    virtual int32_t getColorSpace() const = 0;
     virtual void setFormatOverride(bool formatOverriden) = 0;
     virtual bool isFormatOverridden() const = 0;
     virtual int getOriginalFormat() const = 0;
diff --git a/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp b/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp
index 67f42b4..83caa00 100644
--- a/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp
+++ b/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp
@@ -68,8 +68,10 @@
         return true;
     }
 
-    // Cache the frame to match readout time interval, for up to 33ms
-    nsecs_t expectedQueueTime = mLastCameraPresentTime + readoutInterval;
+    // Cache the frame to match readout time interval, for up to kMaxFrameWaitTime
+    // Because the code between here and queueBuffer() takes time to execute, make sure the
+    // presentationInterval is slightly shorter than readoutInterval.
+    nsecs_t expectedQueueTime = mLastCameraPresentTime + readoutInterval - kFrameAdjustThreshold;
     nsecs_t frameWaitTime = std::min(kMaxFrameWaitTime, expectedQueueTime - currentTime);
     if (frameWaitTime > 0 && mPendingBuffers.size() < 2) {
         mBufferCond.waitRelative(mLock, frameWaitTime);
@@ -78,9 +80,9 @@
         }
         currentTime = systemTime();
     }
-    ALOGV("%s: readoutInterval %" PRId64 ", queueInterval %" PRId64 ", waited for %" PRId64
+    ALOGV("%s: readoutInterval %" PRId64 ", waited for %" PRId64
             ", timestamp %" PRId64, __FUNCTION__, readoutInterval,
-            currentTime - mLastCameraPresentTime, frameWaitTime, buffer.timestamp);
+            mPendingBuffers.size() < 2 ? frameWaitTime : 0, buffer.timestamp);
     mPendingBuffers.pop();
     queueBufferToClientLocked(buffer, currentTime);
     return true;
diff --git a/services/camera/libcameraservice/device3/PreviewFrameSpacer.h b/services/camera/libcameraservice/device3/PreviewFrameSpacer.h
index e165768..f46de3d 100644
--- a/services/camera/libcameraservice/device3/PreviewFrameSpacer.h
+++ b/services/camera/libcameraservice/device3/PreviewFrameSpacer.h
@@ -85,7 +85,8 @@
     nsecs_t mLastCameraPresentTime = 0;
     static constexpr nsecs_t kWaitDuration = 5000000LL; // 50ms
     static constexpr nsecs_t kFrameIntervalThreshold = 80000000LL; // 80ms
-    static constexpr nsecs_t kMaxFrameWaitTime = 33333333LL; // 33ms
+    static constexpr nsecs_t kMaxFrameWaitTime = 10000000LL; // 10ms
+    static constexpr nsecs_t kFrameAdjustThreshold = 2000000LL; // 2ms
 };
 
 }; //namespace camera3
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 4f4e581..99c067e 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -913,6 +913,7 @@
                     cam3stream->getOriginalFormat() : src->format);
         dst.dataSpace = mapToAidlDataspace(cam3stream->isDataSpaceOverridden() ?
                     cam3stream->getOriginalDataSpace() : src->data_space);
+        dst.colorSpace = src->color_space;
 
         dst.bufferSize = bufferSizes[i];
         if (src->physical_camera_id != nullptr) {
diff --git a/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h b/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
index 74b3700..a071989 100644
--- a/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
+++ b/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
@@ -73,6 +73,10 @@
           ANDROID_REQUEST_RECOMMENDED_TEN_BIT_DYNAMIC_RANGE_PROFILE,
           ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES,
         } },
+      {34, {
+          ANDROID_CONTROL_AVAILABLE_SETTINGS_OVERRIDES,
+          ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP,
+        } },
 };
 
 /**
@@ -89,4 +93,7 @@
           ANDROID_SENSOR_PIXEL_MODE,
           ANDROID_SENSOR_RAW_BINNING_FACTOR_USED,
         }  },
+      {34, {
+          ANDROID_CONTROL_SETTINGS_OVERRIDE,
+        }  },
 };
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
index d909624..b1bf41e 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -52,7 +52,7 @@
         "android.hardware.camera.provider@2.5",
         "android.hardware.camera.provider@2.6",
         "android.hardware.camera.provider@2.7",
-        "android.hardware.camera.provider-V1-ndk",
+        "android.hardware.camera.provider-V2-ndk",
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.3",
diff --git a/services/camera/libcameraservice/tests/Android.bp b/services/camera/libcameraservice/tests/Android.bp
index 4d7798c..5e2a3fb 100644
--- a/services/camera/libcameraservice/tests/Android.bp
+++ b/services/camera/libcameraservice/tests/Android.bp
@@ -49,7 +49,7 @@
         "android.hardware.camera.provider@2.5",
         "android.hardware.camera.provider@2.6",
         "android.hardware.camera.provider@2.7",
-        "android.hardware.camera.provider-V1-ndk",
+        "android.hardware.camera.provider-V2-ndk",
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.4",
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index e9f6979..2f55def 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -102,23 +102,57 @@
     sp<device::V3_2::ICameraDevice> mDeviceInterface;
     hardware::hidl_vec<common::V1_0::VendorTagSection> mVendorTagSections;
 
+    // Whether to call a physical camera unavailable callback upon setCallback
+    bool mHasPhysicalCameraUnavailableCallback;
+    hardware::hidl_string mLogicalCameraId;
+    hardware::hidl_string mUnavailablePhysicalCameraId;
+
     TestICameraProvider(const std::vector<hardware::hidl_string> &devices,
             const hardware::hidl_vec<common::V1_0::VendorTagSection> &vendorSection) :
         mDeviceNames(devices),
         mDeviceInterface(new TestDeviceInterface(devices)),
-        mVendorTagSections (vendorSection) {}
+        mVendorTagSections (vendorSection),
+        mHasPhysicalCameraUnavailableCallback(false) {}
 
     TestICameraProvider(const std::vector<hardware::hidl_string> &devices,
             const hardware::hidl_vec<common::V1_0::VendorTagSection> &vendorSection,
             android::hardware::hidl_vec<uint8_t> chars) :
         mDeviceNames(devices),
         mDeviceInterface(new TestDeviceInterface(devices, chars)),
-        mVendorTagSections (vendorSection) {}
+        mVendorTagSections (vendorSection),
+        mHasPhysicalCameraUnavailableCallback(false) {}
+
+    TestICameraProvider(const std::vector<hardware::hidl_string> &devices,
+            const hardware::hidl_vec<common::V1_0::VendorTagSection> &vendorSection,
+            android::hardware::hidl_vec<uint8_t> chars,
+            const hardware::hidl_string& logicalCameraId,
+            const hardware::hidl_string& unavailablePhysicalCameraId) :
+        mDeviceNames(devices),
+        mDeviceInterface(new TestDeviceInterface(devices, chars)),
+        mVendorTagSections (vendorSection),
+        mHasPhysicalCameraUnavailableCallback(true),
+        mLogicalCameraId(logicalCameraId),
+        mUnavailablePhysicalCameraId(unavailablePhysicalCameraId) {}
 
     virtual hardware::Return<Status> setCallback(
             const sp<provider::V2_4::ICameraProviderCallback>& callbacks) override {
         mCalledCounter[SET_CALLBACK]++;
         mCallbacks = callbacks;
+        if (mHasPhysicalCameraUnavailableCallback) {
+            auto cast26 = provider::V2_6::ICameraProviderCallback::castFrom(callbacks);
+            if (!cast26.isOk()) {
+                ADD_FAILURE() << "Failed to cast ICameraProviderCallback to V2_6";
+            } else {
+                sp<provider::V2_6::ICameraProviderCallback> callback26 = cast26;
+                if (callback26 == nullptr) {
+                    ADD_FAILURE() << "V2_6::ICameraProviderCallback is null after conversion";
+                } else {
+                    callback26->physicalCameraDeviceStatusChange(mLogicalCameraId,
+                            mUnavailablePhysicalCameraId,
+                            android::hardware::camera::common::V1_0::CameraDeviceStatus::NOT_PRESENT);
+                }
+            }
+        }
         return hardware::Return<Status>(Status::OK);
     }
 
@@ -266,12 +300,16 @@
 };
 
 struct TestStatusListener : public CameraProviderManager::StatusListener {
+    int mPhysicalCameraStatusChangeCount = 0;
+
     ~TestStatusListener() {}
 
     void onDeviceStatusChanged(const String8 &,
             CameraDeviceStatus) override {}
     void onDeviceStatusChanged(const String8 &, const String8 &,
-            CameraDeviceStatus) override {}
+            CameraDeviceStatus) override {
+        mPhysicalCameraStatusChangeCount++;
+    }
     void onTorchStatusChanged(const String8 &,
             TorchModeStatus) override {}
     void onTorchStatusChanged(const String8 &,
@@ -634,3 +672,46 @@
     ASSERT_EQ(deviceCount, deviceNames.size()) <<
             "Unexpected amount of camera devices";
 }
+
+// Test that CameraProviderManager does not trigger
+// onDeviceStatusChanged(NOT_PRESENT) for physical camera before initialize()
+// returns.
+TEST(CameraProviderManagerTest, PhysicalCameraAvailabilityCallbackRaceTest) {
+    std::vector<hardware::hidl_string> deviceNames;
+    deviceNames.push_back("device@3.2/test/0");
+    hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+
+    sp<CameraProviderManager> providerManager = new CameraProviderManager();
+    sp<TestStatusListener> statusListener = new TestStatusListener();
+    TestInteractionProxy serviceProxy;
+
+    android::hardware::hidl_vec<uint8_t> chars;
+    CameraMetadata meta;
+    int32_t charKeys[] = { ANDROID_REQUEST_AVAILABLE_CAPABILITIES };
+    meta.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, charKeys,
+            sizeof(charKeys) / sizeof(charKeys[0]));
+    uint8_t capabilities[] = { ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA };
+    meta.update(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, capabilities,
+            sizeof(capabilities)/sizeof(capabilities[0]));
+    uint8_t physicalCameraIds[] = { '2', '\0', '3', '\0' };
+    meta.update(ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS, physicalCameraIds,
+            sizeof(physicalCameraIds)/sizeof(physicalCameraIds[0]));
+    camera_metadata_t* metaBuffer = const_cast<camera_metadata_t*>(meta.getAndLock());
+    chars.setToExternal(reinterpret_cast<uint8_t*>(metaBuffer),
+            get_camera_metadata_size(metaBuffer));
+
+    sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+            vendorSection, chars, "device@3.2/test/0", "2");
+    serviceProxy.setProvider(provider);
+
+    status_t res = providerManager->initialize(statusListener, &serviceProxy);
+    ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+    ASSERT_EQ(statusListener->mPhysicalCameraStatusChangeCount, 0)
+            << "Unexpected physical camera status change callback upon provider init.";
+
+    std::unordered_map<std::string, std::set<std::string>> unavailablePhysicalIds;
+    auto cameraIds = providerManager->getCameraDeviceIds(&unavailablePhysicalIds);
+    ASSERT_TRUE(unavailablePhysicalIds.count("0") > 0 && unavailablePhysicalIds["0"].count("2") > 0)
+        << "Unavailable physical camera Ids not set properly.";
+}
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index ed490a8..f9afd41 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -282,6 +282,61 @@
     }
 }
 
+bool deviceReportsColorSpaces(const CameraMetadata& staticInfo) {
+    camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    for (size_t i = 0; i < entry.count; ++i) {
+        uint8_t capability = entry.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_COLOR_SPACE_PROFILES) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+bool isColorSpaceSupported(int32_t colorSpace, int32_t format, android_dataspace dataSpace,
+        int64_t dynamicRangeProfile, const CameraMetadata& staticInfo) {
+    int64_t colorSpace64 = colorSpace;
+    int64_t format64 = format;
+
+    // Translate HAL format + data space to public format
+    if (format == HAL_PIXEL_FORMAT_BLOB && dataSpace == HAL_DATASPACE_V0_JFIF) {
+        format64 = 0x100; // JPEG
+    } else if (format == HAL_PIXEL_FORMAT_BLOB
+            && dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) {
+        format64 = 0x48454946; // HEIC
+    } else if (format == HAL_PIXEL_FORMAT_BLOB
+            && dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_DYNAMIC_DEPTH)) {
+        format64 = 0x69656963; // DEPTH_JPEG
+    } else if (format == HAL_PIXEL_FORMAT_BLOB && dataSpace == HAL_DATASPACE_DEPTH) {
+        return false; // DEPTH_POINT_CLOUD, not applicable
+    } else if (format == HAL_PIXEL_FORMAT_Y16 && dataSpace == HAL_DATASPACE_DEPTH) {
+        return false; // DEPTH16, not applicable
+    } else if (format == HAL_PIXEL_FORMAT_RAW16 && dataSpace == HAL_DATASPACE_DEPTH) {
+        return false; // RAW_DEPTH, not applicable
+    } else if (format == HAL_PIXEL_FORMAT_RAW10 && dataSpace == HAL_DATASPACE_DEPTH) {
+        return false; // RAW_DEPTH10, not applicable
+    }
+
+    camera_metadata_ro_entry_t entry =
+            staticInfo.find(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP);
+    for (size_t i = 0; i < entry.count; i += 3) {
+        bool isFormatCompatible = (format64 == entry.data.i64[i + 1]);
+        bool isDynamicProfileCompatible =
+                (dynamicRangeProfile & entry.data.i64[i + 2]) != 0;
+
+        if (colorSpace64 == entry.data.i64[i]
+                && isFormatCompatible
+                && isDynamicProfileCompatible) {
+            return true;
+        }
+    }
+
+    ALOGE("Color space %d, image format %" PRId64 ", and dynamic range 0x%" PRIx64
+            " combination not found", colorSpace, format64, dynamicRangeProfile);
+    return false;
+}
+
 bool isPublicFormat(int32_t format)
 {
     switch(format) {
@@ -336,7 +391,8 @@
         sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
         const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
         const std::vector<int32_t> &sensorPixelModesUsed, int64_t dynamicRangeProfile,
-        int64_t streamUseCase, int timestampBase, int mirrorMode) {
+        int64_t streamUseCase, int timestampBase, int mirrorMode,
+        int32_t colorSpace) {
     // bufferProducer must be non-null
     if (gbp == nullptr) {
         String8 msg = String8::format("Camera %s: Surface is NULL", logicalCameraId.string());
@@ -450,6 +506,16 @@
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
+    if (colorSpace != ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED &&
+            SessionConfigurationUtils::deviceReportsColorSpaces(physicalCameraMetadata) &&
+            !SessionConfigurationUtils::isColorSpaceSupported(colorSpace, format, dataSpace,
+                    dynamicRangeProfile, physicalCameraMetadata)) {
+        String8 msg = String8::format("Camera %s: Color space %d not supported, failed to "
+                "create output stream (pixel format %d dynamic range profile %" PRId64 ")",
+                logicalCameraId.string(), colorSpace, format, dynamicRangeProfile);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
     if (!SessionConfigurationUtils::isStreamUseCaseSupported(streamUseCase,
             physicalCameraMetadata)) {
         String8 msg = String8::format("Camera %s: stream use case %" PRId64 " not supported,"
@@ -483,6 +549,7 @@
         streamInfo.streamUseCase = streamUseCase;
         streamInfo.timestampBase = timestampBase;
         streamInfo.mirrorMode = mirrorMode;
+        streamInfo.colorSpace = colorSpace;
         return binder::Status::ok();
     }
     if (width != streamInfo.width) {
@@ -538,6 +605,7 @@
     camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
     stream->usage = AidlCamera3Device::mapToAidlConsumerUsage(u);
     stream->dataSpace = AidlCamera3Device::mapToAidlDataspace(streamInfo.dataSpace);
+    stream->colorSpace = streamInfo.colorSpace;
     stream->rotation = AidlCamera3Device::mapToAidlStreamRotation(rotation);
     stream->id = -1; // Invalid stream id
     stream->physicalCameraId = std::string(physicalId.string());
@@ -635,6 +703,7 @@
         String8 physicalCameraId = String8(it.getPhysicalCameraId());
 
         int64_t dynamicRangeProfile = it.getDynamicRangeProfile();
+        int32_t colorSpace = it.getColorSpace();
         std::vector<int32_t> sensorPixelModesUsed = it.getSensorPixelModesUsed();
         const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId,
                 overrideForPerfClass);
@@ -693,7 +762,7 @@
             sp<Surface> surface;
             res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
                     logicalCameraId, metadataChosen, sensorPixelModesUsed, dynamicRangeProfile,
-                    streamUseCase, timestampBase, mirrorMode);
+                    streamUseCase, timestampBase, mirrorMode, colorSpace);
 
             if (!res.isOk())
                 return res;
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index a127c7b..264045e 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -98,7 +98,8 @@
         sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
         const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
         const std::vector<int32_t> &sensorPixelModesUsed,  int64_t dynamicRangeProfile,
-        int64_t streamUseCase, int timestampBase, int mirrorMode);
+        int64_t streamUseCase, int timestampBase, int mirrorMode,
+        int32_t colorSpace);
 
 //check if format is 10-bit output compatible
 bool is10bitCompatibleFormat(int32_t format);
@@ -109,6 +110,11 @@
 // Check if the device supports a given dynamicRangeProfile
 bool isDynamicRangeProfileSupported(int64_t dynamicRangeProfile, const CameraMetadata& staticMeta);
 
+bool deviceReportsColorSpaces(const CameraMetadata& staticMeta);
+
+bool isColorSpaceSupported(int32_t colorSpace, int32_t format, android_dataspace dataSpace,
+        int64_t dynamicRangeProfile, const CameraMetadata& staticMeta);
+
 bool isStreamUseCaseSupported(int64_t streamUseCase, const CameraMetadata &deviceInfo);
 
 void mapStreamInfo(const OutputStreamInfo &streamInfo,
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
index 1efdc60..250ac63 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
@@ -55,6 +55,10 @@
             return ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION;
         case ANDROID_LENS_DISTORTION:
             return ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION;
+        case ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE:
+            return ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION;
+        case ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE:
+            return ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION;
         default:
             ALOGE("%s: Tag %d doesn't have a maximum resolution counterpart", __FUNCTION__,
                     defaultTag);
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 4488efb..3222950 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -54,6 +54,9 @@
         arm64: {
             src: "seccomp_policy/mediaswcodec-arm64.policy",
         },
+        riscv64: {
+            src: "seccomp_policy/mediaswcodec-riscv64.policy",
+        },
         x86: {
             src: "seccomp_policy/mediaswcodec-x86.policy",
         },
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-riscv64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-riscv64.policy
new file mode 100644
index 0000000..a55c3eb
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-riscv64.policy
@@ -0,0 +1,60 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+read: 1
+mprotect: 1
+prctl: 1
+openat: 1
+getuid: 1
+getrlimit: 1
+writev: 1
+ioctl: 1
+close: 1
+mmap: 1
+munmap: 1
+fstat: 1
+madvise: 1
+newfstatat: 1
+futex: 1
+faccessat: 1
+lseek: 1
+clone: 1
+sigaltstack: 1
+rt_sigprocmask: 1
+setpriority: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+readlinkat: 1
+fstatfs: 1
+pread64: 1
+mremap: 1
+dup: 1
+set_tid_address: 1
+write: 1
+nanosleep: 1
+sched_setscheduler: 1
+uname: 1
+memfd_create: 1
+ftruncate: 1
+getdents64: 1
+ppoll: 1
+
+# Required by AddressSanitizer
+gettid: 1
+sched_yield: 1
+getpid: 1
+
+@include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.riscv64.policy
diff --git a/services/mediaextractor/Android.bp b/services/mediaextractor/Android.bp
index 85ce110..acafe56 100644
--- a/services/mediaextractor/Android.bp
+++ b/services/mediaextractor/Android.bp
@@ -74,6 +74,9 @@
         arm64: {
             src: "seccomp_policy/mediaextractor-arm64.policy",
         },
+        riscv64: {
+            src: "seccomp_policy/mediaextractor-riscv64.policy",
+        },
         x86: {
             src: "seccomp_policy/mediaextractor-x86.policy",
         },
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-riscv64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-riscv64.policy
new file mode 100644
index 0000000..df143dd
--- /dev/null
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-riscv64.policy
@@ -0,0 +1,48 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+mmap: 1
+munmap: 1
+openat: 1
+mprotect: 1
+madvise: 1
+getuid: 1
+fstat: 1
+fstatfs: 1
+read: 1
+setpriority: 1
+sigaltstack: 1
+clone: 1
+sched_setscheduler: 1
+lseek: 1
+newfstatat: 1
+faccessat: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+rt_sigprocmask: 1
+getrlimit: 1
+nanosleep: 1
+getrandom: 1
+timer_create: 1
+timer_settime: 1
+timer_delete: 1
+
+# for dynamically loading extractors
+getdents64: 1
+readlinkat: 1
+pread64: 1
+mremap: 1
+
+# Required by Sanitizers
+sched_yield: 1
+
+@include /apex/com.android.media/etc/seccomp_policy/crash_dump.riscv64.policy
+@include /apex/com.android.media/etc/seccomp_policy/code_coverage.riscv64.policy
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 11534bb..e8d3f6e 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -169,7 +169,7 @@
         "libmemunreachable",
         "libprotobuf-cpp-lite",
         "libstagefright_foundation",
-        "libstatslog",
+        "libstats_media_metrics",
         "libstatspull",
         "libstatssocket",
         "libutils",
@@ -177,6 +177,7 @@
     ],
 
     export_shared_lib_headers: [
+        "libstats_media_metrics",
         "libstatspull",
         "libstatssocket",
     ],
@@ -200,3 +201,33 @@
         "libaudioutils_headers",
     ],
 }
+
+cc_library {
+    name: "libstats_media_metrics",
+    generated_sources: ["stats_media_metrics.cpp"],
+    generated_headers: ["stats_media_metrics.h"],
+    export_generated_headers: ["stats_media_metrics.h"],
+    shared_libs: [
+        "libcutils",
+        "libstatspull",
+        "libstatssocket",
+    ],
+}
+
+genrule {
+    name: "stats_media_metrics.h",
+    tools: ["stats-log-api-gen"],
+    cmd: "$(location stats-log-api-gen) --header $(genDir)/stats_media_metrics.h --module media_metrics --namespace android,stats,media_metrics",
+    out: [
+        "stats_media_metrics.h",
+    ],
+}
+
+genrule {
+    name: "stats_media_metrics.cpp",
+    tools: ["stats-log-api-gen"],
+    cmd: "$(location stats-log-api-gen) --cpp $(genDir)/stats_media_metrics.cpp --module media_metrics --namespace android,stats,media_metrics --importHeader stats_media_metrics.h",
+    out: [
+        "stats_media_metrics.cpp",
+    ],
+}
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index b03e418..119bb6c 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -24,7 +24,7 @@
 #include <aaudio/AAudio.h>        // error codes
 #include <audio_utils/clock.h>    // clock conversions
 #include <cutils/properties.h>
-#include <statslog.h>             // statsd
+#include <stats_media_metrics.h>             // statsd
 #include <system/audio.h>
 
 #include "AudioTypes.h"           // string to int conversions
@@ -292,7 +292,7 @@
     int result = 0;
 
 #ifdef STATSD_ENABLE
-    result = android::util::stats_write(args...);
+    result = stats::media_metrics::stats_write(args...);
 #endif
     return result;
 }
@@ -308,7 +308,7 @@
     std::stringstream ss;
 
 #ifdef STATSD_ENABLE
-    result = android::util::stats_write(args...);
+    result = stats::media_metrics::stats_write(args...);
     ss << "result:" << result;
 #endif
     ss << " { ";
@@ -607,7 +607,7 @@
         const int atom_status = types::lookup<types::STATUS, int32_t>(statusString);
 
         // currently we only send create status events.
-        const int32_t event = android::util::
+        const int32_t event = stats::media_metrics::
                 MEDIAMETRICS_AUDIO_RECORD_STATUS_REPORTED__EVENT__AUDIO_RECORD_EVENT_CREATE;
 
         // The following fields should all be present in a create event.
@@ -647,7 +647,7 @@
                 __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD, AMEDIAMETRICS_PROP_SAMPLERATE);
 
         const auto [ result, str ] = sendToStatsd(AudioRecordStatusFields,
-                CONDITION(android::util::MEDIAMETRICS_AUDIORECORDSTATUS_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIORECORDSTATUS_REPORTED)
                 , atom_status
                 , message.c_str()
                 , subCode
@@ -661,7 +661,7 @@
                 , sampleRate
                 );
         ALOGV("%s: statsd %s", __func__, str.c_str());
-        mStatsdLog->log(android::util::MEDIAMETRICS_AUDIORECORDSTATUS_REPORTED, str);
+        mStatsdLog->log(stats::media_metrics::MEDIAMETRICS_AUDIORECORDSTATUS_REPORTED, str);
         return true;
     }
     return false;
@@ -679,7 +679,7 @@
         const int atom_status = types::lookup<types::STATUS, int32_t>(statusString);
 
         // currently we only send create status events.
-        const int32_t event = android::util::
+        const int32_t event = stats::media_metrics::
                 MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__EVENT__AUDIO_TRACK_EVENT_CREATE;
 
         // The following fields should all be present in a create event.
@@ -734,7 +734,7 @@
                 __func__,
                 AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_PITCH);
         const auto [ result, str ] = sendToStatsd(AudioTrackStatusFields,
-                CONDITION(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED)
                 , atom_status
                 , message.c_str()
                 , subCode
@@ -751,7 +751,7 @@
                 , (float)pitch
                 );
         ALOGV("%s: statsd %s", __func__, str.c_str());
-        mStatsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED, str);
+        mStatsdLog->log(stats::media_metrics::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED, str);
         return true;
     }
     return false;
@@ -860,7 +860,7 @@
         if (clientCalled  // only log if client app called AudioRecord.
                 && mAudioAnalytics.mDeliverStatistics) {
             const auto [ result, str ] = sendToStatsd(AudioRecordDeviceUsageFields,
-                    CONDITION(android::util::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED)
+                    CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED)
                     , ENUM_EXTRACT(inputDeviceStatsd)
                     , inputDeviceNames.c_str()
                     , deviceTimeNs
@@ -878,7 +878,7 @@
                     );
             ALOGV("%s: statsd %s", __func__, str.c_str());
             mAudioAnalytics.mStatsdLog->log(
-                    android::util::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED, str);
+                    stats::media_metrics::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED, str);
         }
     } break;
     case THREAD: {
@@ -930,7 +930,7 @@
               << ")";
         if (mAudioAnalytics.mDeliverStatistics) {
             const auto [ result, str ] = sendToStatsd(AudioThreadDeviceUsageFields,
-                CONDITION(android::util::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED)
                 , ENUM_EXTRACT(deviceStatsd)
                 , deviceNames.c_str()
                 , deviceTimeNs
@@ -944,7 +944,7 @@
             );
             ALOGV("%s: statsd %s", __func__, str.c_str());
             mAudioAnalytics.mStatsdLog->log(
-                    android::util::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED, str);
+                    stats::media_metrics::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED, str);
         }
     } break;
     case TRACK: {
@@ -1050,7 +1050,7 @@
         if (clientCalled // only log if client app called AudioTracks
                 && mAudioAnalytics.mDeliverStatistics) {
             const auto [ result, str ] = sendToStatsd(AudioTrackDeviceUsageFields,
-                    CONDITION(android::util::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED)
+                    CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED)
                     , ENUM_EXTRACT(outputDeviceStatsd)
                     , outputDeviceNames.c_str()
                     , deviceTimeNs
@@ -1074,7 +1074,7 @@
                     );
             ALOGV("%s: statsd %s", __func__, str.c_str());
             mAudioAnalytics.mStatsdLog->log(
-                    android::util::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED, str);
+                    stats::media_metrics::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED, str);
         }
         } break;
     }
@@ -1136,7 +1136,7 @@
             const long_enum_type_t inputDeviceBits{};
 
             const auto [ result, str ] = sendToStatsd(AudioDeviceConnectionFields,
-                    CONDITION(android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
+                    CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
                     , ENUM_EXTRACT(inputDeviceBits)
                     , ENUM_EXTRACT(outputDeviceBits)
                     , mA2dpDeviceName.c_str()
@@ -1146,7 +1146,7 @@
                     );
             ALOGV("%s: statsd %s", __func__, str.c_str());
             mAudioAnalytics.mStatsdLog->log(
-                    android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
+                    stats::media_metrics::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
         }
     }
 }
@@ -1190,7 +1190,7 @@
                 << " deviceName:" << mA2dpDeviceName;
         if (mAudioAnalytics.mDeliverStatistics) {
             const auto [ result, str ] = sendToStatsd(AudioDeviceConnectionFields,
-                    CONDITION(android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
+                    CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
                     , ENUM_EXTRACT(inputDeviceBits)
                     , ENUM_EXTRACT(outputDeviceBits)
                     , mA2dpDeviceName.c_str()
@@ -1200,7 +1200,7 @@
                     );
             ALOGV("%s: statsd %s", __func__, str.c_str());
             mAudioAnalytics.mStatsdLog->log(
-                    android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
+                    stats::media_metrics::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
         }
         return;
     }
@@ -1217,7 +1217,7 @@
             << " deviceName:" << mA2dpDeviceName;
     if (mAudioAnalytics.mDeliverStatistics) {
         const auto [ result, str ] = sendToStatsd(AudioDeviceConnectionFields,
-                CONDITION(android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
                 , ENUM_EXTRACT(inputDeviceBits)
                 , ENUM_EXTRACT(outputDeviceBits)
                 , mA2dpDeviceName.c_str()
@@ -1227,7 +1227,7 @@
                 );
         ALOGV("%s: statsd %s", __func__, str.c_str());
         mAudioAnalytics.mStatsdLog->log(
-                android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
+                stats::media_metrics::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
     }
 }
 
@@ -1355,10 +1355,10 @@
                     << "(" << sharingModeRequestedStr << ")";
 
     if (mAudioAnalytics.mDeliverStatistics) {
-        android::util::BytesField bf_serialized(
+        const stats::media_metrics::BytesField bf_serialized(
             serializedDeviceTypes.c_str(), serializedDeviceTypes.size());
         const auto result = sendToStatsd(
-                CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
                 , path
                 , direction
                 , framesPerBurst
@@ -1381,7 +1381,7 @@
         std::stringstream ss;
         ss << "result:" << result;
         const auto fieldsStr = printFields(AAudioStreamFields,
-                CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
                 , path
                 , direction
                 , framesPerBurst
@@ -1404,7 +1404,7 @@
         ss << " " << fieldsStr;
         std::string str = ss.str();
         ALOGV("%s: statsd %s", __func__, str.c_str());
-        mAudioAnalytics.mStatsdLog->log(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED, str);
+        mAudioAnalytics.mStatsdLog->log(stats::media_metrics::MEDIAMETRICS_AAUDIOSTREAM_REPORTED, str);
     }
 }
 
@@ -1544,12 +1544,12 @@
 // Classifies the setting event for statsd (use generated statsd enums.proto constants).
 static int32_t classifySettingEvent(bool isSetAlready, bool withinBoot) {
     if (isSetAlready) {
-        return util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__EVENT__SPATIALIZER_SETTING_EVENT_NORMAL;
+        return stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__EVENT__SPATIALIZER_SETTING_EVENT_NORMAL;
     }
     if (withinBoot) {
-        return util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__EVENT__SPATIALIZER_SETTING_EVENT_BOOT;
+        return stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__EVENT__SPATIALIZER_SETTING_EVENT_BOOT;
     }
-    return util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__EVENT__SPATIALIZER_SETTING_EVENT_FIRST;
+    return stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__EVENT__SPATIALIZER_SETTING_EVENT_FIRST;
 }
 
 void AudioAnalytics::Spatializer::onEvent(
@@ -1598,7 +1598,7 @@
                 types::channelMaskVectorFromString(channelMasks);
 
         const auto [ result, str ] = sendToStatsd(SpatializerCapabilitiesFields,
-                CONDITION(android::util::MEDIAMETRICS_SPATIALIZERCAPABILITIES_REPORTED)
+                CONDITION(stats::media_metrics::MEDIAMETRICS_SPATIALIZERCAPABILITIES_REPORTED)
                 , headTrackingModesVector
                 , levelsVector
                 , modesVector
@@ -1606,7 +1606,7 @@
                 );
 
         mAudioAnalytics.mStatsdLog->log(
-                android::util::MEDIAMETRICS_SPATIALIZERCAPABILITIES_REPORTED, str);
+                stats::media_metrics::MEDIAMETRICS_SPATIALIZERCAPABILITIES_REPORTED, str);
 
         std::lock_guard lg(mLock);
         if (mFirstCreateTimeNs == 0) {
@@ -1655,13 +1655,13 @@
                 deviceState.enabled = enabled;
                 const bool enabledStatsd = enabled == "true";
                 const auto [ result, str ] = sendToStatsd(SpatializerDeviceEnabledFields,
-                        CONDITION(android::util::MEDIAMETRICS_SPATIALIZERDEVICEENABLED_REPORTED)
+                        CONDITION(stats::media_metrics::MEDIAMETRICS_SPATIALIZERDEVICEENABLED_REPORTED)
                         , deviceTypeStatsd
                         , settingEventStatsd
                         , enabledStatsd
                         );
                 mAudioAnalytics.mStatsdLog->log(
-                        android::util::MEDIAMETRICS_SPATIALIZERDEVICEENABLED_REPORTED, str);
+                        stats::media_metrics::MEDIAMETRICS_SPATIALIZERDEVICEENABLED_REPORTED, str);
             }
         }
         if (!hasHeadTracker.empty()) {
@@ -1671,13 +1671,13 @@
                 deviceState.hasHeadTracker = hasHeadTracker;
                 const bool supportedStatsd = hasHeadTracker == "true";
                 const auto [ result, str ] = sendToStatsd(HeadTrackerDeviceSupportedFields,
-                        CONDITION(android::util::MEDIAMETRICS_HEADTRACKERDEVICESUPPORTED_REPORTED)
+                        CONDITION(stats::media_metrics::MEDIAMETRICS_HEADTRACKERDEVICESUPPORTED_REPORTED)
                         , deviceTypeStatsd
                         , settingEventStatsd
                         , supportedStatsd
                         );
                 mAudioAnalytics.mStatsdLog->log(
-                        android::util::MEDIAMETRICS_HEADTRACKERDEVICESUPPORTED_REPORTED, str);
+                        stats::media_metrics::MEDIAMETRICS_HEADTRACKERDEVICESUPPORTED_REPORTED, str);
             }
         }
         if (!headTrackerEnabled.empty()) {
@@ -1687,13 +1687,13 @@
                 deviceState.headTrackerEnabled = headTrackerEnabled;
                 const bool enabledStatsd = headTrackerEnabled == "true";
                 const auto [ result, str ] = sendToStatsd(HeadTrackerDeviceEnabledFields,
-                        CONDITION(android::util::MEDIAMETRICS_HEADTRACKERDEVICEENABLED_REPORTED)
+                        CONDITION(stats::media_metrics::MEDIAMETRICS_HEADTRACKERDEVICEENABLED_REPORTED)
                         , deviceTypeStatsd
                         , settingEventStatsd
                         , enabledStatsd
                         );
                 mAudioAnalytics.mStatsdLog->log(
-                        android::util::MEDIAMETRICS_HEADTRACKERDEVICEENABLED_REPORTED, str);
+                        stats::media_metrics::MEDIAMETRICS_HEADTRACKERDEVICEENABLED_REPORTED, str);
             }
         }
         mSimpleLog.log("%s deviceKey: %s item: %s",
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index 5787e9e..630a436 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -26,7 +26,7 @@
 #include <string>
 #include <audio_utils/clock.h>
 #include <cutils/properties.h>
-#include <statslog.h>
+#include <stats_media_metrics.h>
 #include <sys/timerfd.h>
 #include <system/audio.h>
 
@@ -164,7 +164,7 @@
     const int32_t duration_secs = (int32_t)(duration_ns / NANOS_PER_SECOND);
     const int32_t min_volume_duration_secs = (int32_t)(min_volume_duration_ns / NANOS_PER_SECOND);
     const int32_t max_volume_duration_secs = (int32_t)(max_volume_duration_ns / NANOS_PER_SECOND);
-    const int result = android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
+    const int result = stats::media_metrics::stats_write(stats::media_metrics::AUDIO_POWER_USAGE_DATA_REPORTED,
                                          audio_device,
                                          duration_secs,
                                          (float)volume,
@@ -177,7 +177,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_audio_power_usage_data_reported:"
-            << android::util::AUDIO_POWER_USAGE_DATA_REPORTED
+            << stats::media_metrics::AUDIO_POWER_USAGE_DATA_REPORTED
             << " audio_device:" << audio_device
             << " duration_secs:" << duration_secs
             << " average_volume:" << (float)volume
@@ -187,7 +187,7 @@
             << " max_volume_duration_secs:" << max_volume_duration_secs
             << " max_volume:" << (float)max_volume
             << " }";
-    mStatsdLog->log(android::util::AUDIO_POWER_USAGE_DATA_REPORTED, log.str());
+    mStatsdLog->log(stats::media_metrics::AUDIO_POWER_USAGE_DATA_REPORTED, log.str());
 }
 
 void AudioPowerUsage::updateMinMaxVolumeAndDuration(
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index d2b4aab..353ae12 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -18,7 +18,7 @@
 #include "MediaMetricsConstants.h"
 #include "StringUtils.h"
 #include <media/TypeConverter.h> // requires libmedia_helper to get the Audio code.
-#include <statslog.h>            // statsd
+#include <stats_media_metrics.h>            // statsd
 
 namespace android::mediametrics::types {
 
@@ -184,41 +184,41 @@
 const std::unordered_map<std::string, int32_t>& getAudioDeviceInfoTypeMap() {
     // DO NOT MODIFY VALUES (OK to add new ones).
     static std::unordered_map<std::string, int32_t> map{
-        {"unknown", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_UNKNOWN},
-        {"earpiece", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUILTIN_EARPIECE},
-        {"speaker", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUILTIN_SPEAKER},
-        {"headset", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_WIRED_HEADSET},
-        {"headphone", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_WIRED_HEADPHONES}, // sic
-        {"bt_sco", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_SCO},
-        {"bt_sco_hs", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_SCO},
-        {"bt_sco_carkit", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_SCO},
-        {"bt_a2dp", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_A2DP},
-        {"bt_a2dp_hp", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_A2DP},
-        {"bt_a2dp_spk", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_A2DP},
-        {"aux_digital", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI},
-        {"hdmi", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI},
-        {"analog_dock", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_DOCK},
-        {"digital_dock", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_DOCK},
-        {"usb_accessory", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_USB_ACCESSORY},
-        {"usb_device", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_USB_DEVICE},
-        {"usb_headset", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_USB_HEADSET},
-        {"remote_submix", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_REMOTE_SUBMIX},
-        {"telephony_tx", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_TELEPHONY},
-        {"line", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_LINE_ANALOG},
-        {"hdmi_arc", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI_ARC},
-        {"hdmi_earc", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI_EARC},
-        {"spdif", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_LINE_DIGITAL},
-        {"fm_transmitter", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_FM},
-        {"aux_line", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_AUX_LINE},
-        {"speaker_safe", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUILTIN_SPEAKER_SAFE},
-        {"ip", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_IP},
-        {"bus", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUS},
-        {"proxy", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_UNKNOWN /* AUDIO_DEVICE_INFO_TYPE_PROXY */},
-        {"hearing_aid_out", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HEARING_AID},
-        {"echo_canceller", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_ECHO_REFERENCE}, // sic
-        {"ble_headset", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLE_HEADSET},
-        {"ble_speaker", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLE_SPEAKER},
-        {"ble_broadcast", util::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLE_BROADCAST},
+        {"unknown", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_UNKNOWN},
+        {"earpiece", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUILTIN_EARPIECE},
+        {"speaker", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUILTIN_SPEAKER},
+        {"headset", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_WIRED_HEADSET},
+        {"headphone", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_WIRED_HEADPHONES}, // sic
+        {"bt_sco", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_SCO},
+        {"bt_sco_hs", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_SCO},
+        {"bt_sco_carkit", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_SCO},
+        {"bt_a2dp", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_A2DP},
+        {"bt_a2dp_hp", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_A2DP},
+        {"bt_a2dp_spk", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLUETOOTH_A2DP},
+        {"aux_digital", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI},
+        {"hdmi", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI},
+        {"analog_dock", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_DOCK},
+        {"digital_dock", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_DOCK},
+        {"usb_accessory", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_USB_ACCESSORY},
+        {"usb_device", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_USB_DEVICE},
+        {"usb_headset", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_USB_HEADSET},
+        {"remote_submix", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_REMOTE_SUBMIX},
+        {"telephony_tx", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_TELEPHONY},
+        {"line", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_LINE_ANALOG},
+        {"hdmi_arc", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI_ARC},
+        {"hdmi_earc", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HDMI_EARC},
+        {"spdif", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_LINE_DIGITAL},
+        {"fm_transmitter", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_FM},
+        {"aux_line", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_AUX_LINE},
+        {"speaker_safe", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUILTIN_SPEAKER_SAFE},
+        {"ip", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_IP},
+        {"bus", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BUS},
+        {"proxy", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_UNKNOWN /* AUDIO_DEVICE_INFO_TYPE_PROXY */},
+        {"hearing_aid_out", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_HEARING_AID},
+        {"echo_canceller", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_ECHO_REFERENCE}, // sic
+        {"ble_headset", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLE_HEADSET},
+        {"ble_speaker", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLE_SPEAKER},
+        {"ble_broadcast", stats::media_metrics::MEDIAMETRICS_SPATIALIZER_DEVICE_ENABLED_REPORTED__TYPE__AUDIO_DEVICE_INFO_TYPE_BLE_BROADCAST},
     };
     return map;
 }
@@ -324,23 +324,23 @@
     // DO NOT MODIFY VALUES(OK to add new ones).
     static std::unordered_map<std::string, int32_t> map {
         {"",
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__NO_ERROR},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__NO_ERROR},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_OK,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__NO_ERROR},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__NO_ERROR},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_ARGUMENT},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_ARGUMENT},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_IO,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_IO},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_IO},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_MEMORY},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_MEMORY},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_SECURITY},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_SECURITY},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_STATE},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_STATE},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_TIMEOUT},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_TIMEOUT},
         {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN,
-            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN},
+            stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN},
     };
     return map;
 }
@@ -664,7 +664,7 @@
     auto& map = getStatusMap();
     auto it = map.find(status);
     if (it == map.end()) {
-        return util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN;
+        return stats::media_metrics::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN;
     }
     return it->second;
 }
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index ceb3e6a..b4de4f4 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -33,7 +33,7 @@
 #include <mediautils/MemoryLeakTrackUtil.h>
 #include <memunreachable/memunreachable.h>
 #include <private/android_filesystem_config.h> // UID
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include <set>
 
@@ -546,7 +546,7 @@
     if (mStatsdRegistered.test_and_set()) {
         return;
     }
-    auto tag = android::util::MEDIA_DRM_ACTIVITY_INFO;
+    auto tag = stats::media_metrics::MEDIA_DRM_ACTIVITY_INFO;
     auto cb = MediaMetricsService::pullAtomCallback;
     AStatsManager_setPullAtomCallback(tag, /* metadata */ nullptr, cb, this);
 }
@@ -564,7 +564,7 @@
 std::string MediaMetricsService::atomTagToKey(int32_t atomTag)
 {
     switch (atomTag) {
-    case android::util::MEDIA_DRM_ACTIVITY_INFO:
+    case stats::media_metrics::MEDIA_DRM_ACTIVITY_INFO:
         return "mediadrm";
     }
     return {};
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
index 84d494e..8b33f10 100644
--- a/services/mediametrics/fuzzer/Android.bp
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -51,7 +51,7 @@
         "libprotobuf-cpp-lite",
         "libstagefright",
         "libstagefright_foundation",
-        "libstatslog",
+        "libstats_media_metrics",
         "libstatspull",
         "libstatssocket",
         "libutils",
diff --git a/services/mediametrics/iface_statsd.cpp b/services/mediametrics/iface_statsd.cpp
index 776f878..f64b3ec 100644
--- a/services/mediametrics/iface_statsd.cpp
+++ b/services/mediametrics/iface_statsd.cpp
@@ -37,8 +37,6 @@
 #include "MediaMetricsService.h"
 #include "iface_statsd.h"
 
-#include <statslog.h>
-
 namespace android {
 
 // set of routines that crack a mediametrics::Item
diff --git a/services/mediametrics/statsd_audiopolicy.cpp b/services/mediametrics/statsd_audiopolicy.cpp
index 3d9376e..9a9bc1d 100644
--- a/services/mediametrics/statsd_audiopolicy.cpp
+++ b/services/mediametrics/statsd_audiopolicy.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
@@ -107,15 +107,16 @@
         return false;
     }
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_AUDIOPOLICY_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         bf_serialized);
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_audiopolicy_reported:"
-            << android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_AUDIOPOLICY_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -131,7 +132,7 @@
             << " active_session:" << active_session
             << " active_device:" << active_device
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_AUDIOPOLICY_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index 01adf7f..63c61ec 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "ValidateId.h"
@@ -147,8 +147,9 @@
     (void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
     const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_AUDIORECORD_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         bf_serialized,
@@ -156,7 +157,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_audiorecord_reported:"
-            << android::util::MEDIAMETRICS_AUDIORECORD_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_AUDIORECORD_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -181,7 +182,7 @@
 
             << " log_session_id:" << log_session_id
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_AUDIORECORD_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_audiothread.cpp b/services/mediametrics/statsd_audiothread.cpp
index e9b6dd6..3056605 100644
--- a/services/mediametrics/statsd_audiothread.cpp
+++ b/services/mediametrics/statsd_audiothread.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
@@ -188,15 +188,16 @@
         return false;
     }
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_AUDIOTHREAD_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         bf_serialized);
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_audiothread_reported:"
-            << android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_AUDIOTHREAD_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -231,7 +232,7 @@
             << " latency_mean_millis:" << latency_mean_millis
             << " latency_stddev_millis:" << latency_stddev_millis
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_AUDIOTHREAD_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 67514e9..1fc7fb4 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "ValidateId.h"
@@ -134,8 +134,9 @@
     (void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
     const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+                               stats::media_metrics::MEDIAMETRICS_AUDIOTRACK_REPORTED,
                                timestamp_nanos, package_name.c_str(), package_version_code,
                                media_apex_version,
                                bf_serialized,
@@ -143,7 +144,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_audiotrack_reported:"
-            << android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_AUDIOTRACK_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -164,7 +165,7 @@
 
             << " log_session_id:" << log_session_id
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_AUDIOTRACK_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index a737ba0..c5957e9 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 #include <stats_event.h>
 
 #include "cleaner.h"
@@ -46,7 +46,7 @@
     if (item == nullptr) return false;
 
     AStatsEvent* event = AStatsEvent_obtain();
-    AStatsEvent_setAtomId(event, android::util::MEDIA_CODEC_REPORTED);
+    AStatsEvent_setAtomId(event, stats::media_metrics::MEDIA_CODEC_REPORTED);
 
     const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
     AStatsEvent_writeInt64(event, timestamp_nanos);
@@ -455,8 +455,8 @@
         ALOGE("Failed to serialize codec metrics");
         return false;
     }
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_CODEC_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(stats::media_metrics::MEDIAMETRICS_CODEC_REPORTED,
                                timestamp_nanos, package_name.c_str(), package_version_code,
                                media_apex_version,
                                bf_serialized);
@@ -464,7 +464,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_codec_reported:"
-            << android::util::MEDIAMETRICS_CODEC_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_CODEC_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -525,7 +525,7 @@
             << " original_qp_b_min:" << qpBMinOri
             << " original_qp_b_max:" << qpBMaxOri
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_CODEC_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_CODEC_REPORTED, log.str());
 
 
     return true;
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index e06a605..8769efb 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -35,7 +35,7 @@
 #include "StringUtils.h"
 #include "iface_statsd.h"
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include <array>
 #include <string>
@@ -69,8 +69,9 @@
     // This field is left here for backward compatibility.
     // This field is not used anymore.
     const std::string  kUnusedField("");
-    android::util::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_MEDIADRM_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         vendor.c_str(),
@@ -80,7 +81,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_mediadrm_reported:"
-            << android::util::MEDIAMETRICS_MEDIADRM_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_MEDIADRM_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -90,7 +91,7 @@
             << " description:" << description
             // omitting serialized
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_MEDIADRM_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_MEDIADRM_REPORTED, log.str());
     return true;
 }
 
@@ -122,7 +123,8 @@
         item->getInt64(("method"s + std::to_string(i)).c_str(), &methodCounts[i]);
     }
 
-    const int result = android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
+    const int result = stats::media_metrics::stats_write(
+                               stats::media_metrics::MEDIAMETRICS_DRMMANAGER_REPORTED,
                                timestamp_nanos, package_name.c_str(), package_version_code,
                                media_apex_version,
                                plugin_id.c_str(), description.c_str(),
@@ -136,7 +138,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_drmmanager_reported:"
-            << android::util::MEDIAMETRICS_DRMMANAGER_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_DRMMANAGER_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -151,7 +153,7 @@
         log << " method_" << i << ":" << methodCounts[i];
     }
     log << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_DRMMANAGER_REPORTED, log.str());
     return true;
 }
 
@@ -207,7 +209,7 @@
 
     // Memory for |event| is internally managed by statsd.
     AStatsEvent* event = AStatsEventList_addStatsEvent(out);
-    AStatsEvent_setAtomId(event, android::util::MEDIA_DRM_ACTIVITY_INFO);
+    AStatsEvent_setAtomId(event, stats::media_metrics::MEDIA_DRM_ACTIVITY_INFO);
     AStatsEvent_writeString(event, item->getPkgName().c_str());
     AStatsEvent_writeInt64(event, item->getPkgVersionCode());
     AStatsEvent_writeString(event, vendor.c_str());
@@ -219,7 +221,7 @@
     std::stringstream log;
     log << "pulled:" << " {"
             << " media_drm_activity_info:"
-            << android::util::MEDIA_DRM_ACTIVITY_INFO
+            << stats::media_metrics::MEDIA_DRM_ACTIVITY_INFO
             << " package_name:" << item->getPkgName()
             << " package_version_code:" << item->getPkgVersionCode()
             << " vendor:" << vendor
@@ -227,7 +229,7 @@
             << " framework_metrics:" << mediametrics::stringutils::bytesToString(framework_raw, 8)
             << " vendor_metrics:" <<  mediametrics::stringutils::bytesToString(plugin_raw, 8)
             << " }";
-    statsdLog->log(android::util::MEDIA_DRM_ACTIVITY_INFO, log.str());
+    statsdLog->log(stats::media_metrics::MEDIA_DRM_ACTIVITY_INFO, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index a8bfeaa..9345df6 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "ValidateId.h"
@@ -96,15 +96,16 @@
         return false;
     }
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_EXTRACTOR_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         bf_serialized);
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_extractor_reported:"
-            << android::util::MEDIAMETRICS_EXTRACTOR_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_EXTRACTOR_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -116,7 +117,7 @@
             << " entry_point:" << entry_point_string << "(" << entry_point << ")"
             << " log_session_id:" << log_session_id
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_EXTRACTOR_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 67ca874b..458bd32 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -28,7 +28,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "ValidateId.h"
@@ -83,7 +83,8 @@
     item->getString("android.media.mediaparser.logSessionId", &logSessionId);
     logSessionId = mediametrics::ValidateId::get()->validateId(logSessionId);
 
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
+    const int result = stats::media_metrics::stats_write(
+                               stats::media_metrics::MEDIAMETRICS_MEDIAPARSER_REPORTED,
                                timestamp_nanos,
                                package_name.c_str(),
                                package_version_code,
@@ -103,7 +104,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_mediaparser_reported:"
-            << android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_MEDIAPARSER_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -120,7 +121,7 @@
             << " video_height:" << videoHeight
             << " log_session_id:" << logSessionId
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_MEDIAPARSER_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_nuplayer.cpp b/services/mediametrics/statsd_nuplayer.cpp
index bdee1f2..fd545f4 100644
--- a/services/mediametrics/statsd_nuplayer.cpp
+++ b/services/mediametrics/statsd_nuplayer.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
@@ -153,8 +153,9 @@
         return false;
     }
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_NUPLAYER_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_NUPLAYER_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         bf_serialized);
@@ -162,7 +163,7 @@
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_nuplayer_reported:"
-            << android::util::MEDIAMETRICS_NUPLAYER_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_NUPLAYER_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -193,7 +194,7 @@
             // TODO NuPlayer - add log_session_id
             // << " log_session_id:" << log_session_id
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_NUPLAYER_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_NUPLAYER_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index 5f54a68..efa284b 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -29,7 +29,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <statslog.h>
+#include <stats_media_metrics.h>
 
 #include "MediaMetricsService.h"
 #include "ValidateId.h"
@@ -179,15 +179,16 @@
         return false;
     }
 
-    android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
-    int result = android::util::stats_write(android::util::MEDIAMETRICS_RECORDER_REPORTED,
+    const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+    const int result = stats::media_metrics::stats_write(
+        stats::media_metrics::MEDIAMETRICS_RECORDER_REPORTED,
         timestamp_nanos, package_name.c_str(), package_version_code,
         media_apex_version,
         bf_serialized);
     std::stringstream log;
     log << "result:" << result << " {"
             << " mediametrics_recorder_reported:"
-            << android::util::MEDIAMETRICS_RECORDER_REPORTED
+            << stats::media_metrics::MEDIAMETRICS_RECORDER_REPORTED
             << " timestamp_nanos:" << timestamp_nanos
             << " package_name:" << package_name
             << " package_version_code:" << package_version_code
@@ -218,7 +219,7 @@
             << " iframe_interval:" << iframe_interval
             << " log_session_id:" << log_session_id
             << " }";
-    statsdLog->log(android::util::MEDIAMETRICS_RECORDER_REPORTED, log.str());
+    statsdLog->log(stats::media_metrics::MEDIAMETRICS_RECORDER_REPORTED, log.str());
     return true;
 }
 
diff --git a/services/tuner/Android.bp b/services/tuner/Android.bp
index 9c4f9a5..0649061 100644
--- a/services/tuner/Android.bp
+++ b/services/tuner/Android.bp
@@ -15,7 +15,7 @@
     imports: [
         "android.hardware.common-V2",
         "android.hardware.common.fmq-V1",
-        "android.hardware.tv.tuner-V1",
+        "android.hardware.tv.tuner-V2",
     ],
     backend: {
         java: {
@@ -41,7 +41,7 @@
     shared_libs: [
         "android.hardware.tv.tuner@1.0",
         "android.hardware.tv.tuner@1.1",
-        "android.hardware.tv.tuner-V1-ndk",
+        "android.hardware.tv.tuner-V2-ndk",
         "libbase",
         "libbinder",
         "libbinder_ndk",
@@ -84,7 +84,7 @@
     shared_libs: [
         "android.hardware.tv.tuner@1.0",
         "android.hardware.tv.tuner@1.1",
-        "android.hardware.tv.tuner-V1-ndk",
+        "android.hardware.tv.tuner-V2-ndk",
         "libbase",
         "libbinder",
         "libfmq",
diff --git a/services/tuner/TunerDvr.cpp b/services/tuner/TunerDvr.cpp
index 9a35db8..a0abc92 100644
--- a/services/tuner/TunerDvr.cpp
+++ b/services/tuner/TunerDvr.cpp
@@ -94,6 +94,21 @@
     return mDvr->close();
 }
 
+::ndk::ScopedAStatus TunerDvr::setStatusCheckIntervalHint(const int64_t milliseconds) {
+    if (milliseconds < 0L) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    ::ndk::ScopedAStatus s = mDvr->setStatusCheckIntervalHint(milliseconds);
+    if (s.getStatus() == STATUS_UNKNOWN_TRANSACTION) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return s;
+}
+
 /////////////// IDvrCallback ///////////////////////
 ::ndk::ScopedAStatus TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
     if (mTunerDvrCallback != nullptr) {
diff --git a/services/tuner/TunerDvr.h b/services/tuner/TunerDvr.h
index 1854d08..2330e7b 100644
--- a/services/tuner/TunerDvr.h
+++ b/services/tuner/TunerDvr.h
@@ -61,6 +61,7 @@
     ::ndk::ScopedAStatus stop() override;
     ::ndk::ScopedAStatus flush() override;
     ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus setStatusCheckIntervalHint(int64_t in_milliseconds) override;
 
     struct DvrCallback : public BnDvrCallback {
         DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
index 2c01c4e..cafe075 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
@@ -66,4 +66,9 @@
      * close the DVR instance to release resource for DVR.
      */
     void close();
+
+    /**
+     * Set status check time interval.
+     */
+    void setStatusCheckIntervalHint(in long milliseconds);
 }
diff --git a/services/tuner/hidl/TunerHidlDvr.cpp b/services/tuner/hidl/TunerHidlDvr.cpp
index 50d92de..8083a6e 100644
--- a/services/tuner/hidl/TunerHidlDvr.cpp
+++ b/services/tuner/hidl/TunerHidlDvr.cpp
@@ -148,6 +148,11 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
+::ndk::ScopedAStatus TunerHidlDvr::setStatusCheckIntervalHint(int64_t /* in_milliseconds */) {
+    HidlResult res = HidlResult::UNAVAILABLE;
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
 HidlDvrSettings TunerHidlDvr::getHidlDvrSettings(const DvrSettings& settings) {
     HidlDvrSettings s;
     switch (mType) {
diff --git a/services/tuner/hidl/TunerHidlDvr.h b/services/tuner/hidl/TunerHidlDvr.h
index a280ff7..aa86b14 100644
--- a/services/tuner/hidl/TunerHidlDvr.h
+++ b/services/tuner/hidl/TunerHidlDvr.h
@@ -63,6 +63,7 @@
     ::ndk::ScopedAStatus stop() override;
     ::ndk::ScopedAStatus flush() override;
     ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus setStatusCheckIntervalHint(int64_t in_milliseconds) override;
 
     struct DvrCallback : public HidlIDvrCallback {
         DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)